From b53f354763f96d81ce15d7bded6f1bfd97aee68b Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Wed, 25 Jan 2017 19:38:26 +0100 Subject: [PATCH] HBASE-17532 Replaced explicit type with diamond operator Signed-off-by: Michael Stack --- .../hadoop/hbase/HColumnDescriptor.java | 11 +- .../org/apache/hadoop/hbase/HRegionInfo.java | 2 +- .../apache/hadoop/hbase/HTableDescriptor.java | 19 ++-- .../hadoop/hbase/MetaTableAccessor.java | 30 +++-- .../org/apache/hadoop/hbase/ServerLoad.java | 4 +- .../apache/hadoop/hbase/client/Append.java | 2 +- .../hadoop/hbase/client/AsyncProcess.java | 11 +- .../hbase/client/AsyncRequestFutureImpl.java | 27 ++--- .../client/AsyncRpcRetryingCallerFactory.java | 2 +- .../hadoop/hbase/client/BatchErrors.java | 10 +- .../client/ClientAsyncPrefetchScanner.java | 4 +- .../hadoop/hbase/client/ClientScanner.java | 7 +- .../hbase/client/ClusterStatusListener.java | 2 +- .../client/ConnectionImplementation.java | 11 +- .../apache/hadoop/hbase/client/Delete.java | 10 +- .../org/apache/hadoop/hbase/client/Get.java | 15 ++- .../hadoop/hbase/client/HBaseAdmin.java | 32 +++--- .../hadoop/hbase/client/HRegionLocator.java | 2 +- .../apache/hadoop/hbase/client/HTable.java | 26 ++--- .../hbase/client/HTableMultiplexer.java | 14 +-- .../apache/hadoop/hbase/client/Increment.java | 5 +- .../hbase/client/MetricsConnection.java | 2 +- .../hadoop/hbase/client/MultiAction.java | 2 +- .../hadoop/hbase/client/MultiResponse.java | 2 +- .../hbase/client/MultiServerCallable.java | 2 +- .../apache/hadoop/hbase/client/Mutation.java | 20 ++-- .../hbase/client/OperationWithAttributes.java | 2 +- .../client/PreemptiveFastFailInterceptor.java | 6 +- .../org/apache/hadoop/hbase/client/Put.java | 6 +- .../apache/hadoop/hbase/client/Result.java | 15 +-- .../ResultBoundedCompletionService.java | 2 +- .../RetriesExhaustedWithDetailsException.java | 4 +- .../hbase/client/ReversedScannerCallable.java | 2 +- .../client/RpcRetryingCallerFactory.java | 4 +- .../hbase/client/RpcRetryingCallerImpl.java | 3 +- .../RpcRetryingCallerWithReadReplicas.java | 2 +- .../org/apache/hadoop/hbase/client/Scan.java | 16 ++- .../client/ScannerCallableWithReplicas.java | 6 +- .../hbase/client/ServerStatisticTracker.java | 3 +- .../client/backoff/ServerStatistics.java | 3 +- .../client/metrics/ServerSideScanMetrics.java | 2 +- .../client/replication/ReplicationAdmin.java | 10 +- .../replication/ReplicationSerDeHelper.java | 4 +- .../hadoop/hbase/filter/CompareFilter.java | 2 +- .../hbase/filter/DependentColumnFilter.java | 2 +- .../hadoop/hbase/filter/FilterList.java | 2 +- ...FirstKeyValueMatchingQualifiersFilter.java | 2 +- .../hadoop/hbase/filter/FuzzyRowFilter.java | 11 +- .../hbase/filter/MultiRowRangeFilter.java | 6 +- .../filter/MultipleColumnPrefixFilter.java | 2 +- .../hadoop/hbase/filter/ParseFilter.java | 14 +-- .../hadoop/hbase/filter/TimestampsFilter.java | 6 +- .../hbase/ipc/NettyRpcDuplexHandler.java | 2 +- .../hadoop/hbase/protobuf/ProtobufUtil.java | 9 +- .../hadoop/hbase/quotas/QuotaFilter.java | 2 +- .../hadoop/hbase/quotas/QuotaRetriever.java | 2 +- .../hbase/quotas/QuotaSettingsFactory.java | 4 +- .../replication/ReplicationPeerConfig.java | 4 +- .../replication/ReplicationPeerZKImpl.java | 2 +- .../replication/ReplicationPeersZKImpl.java | 10 +- .../replication/ReplicationQueueInfo.java | 2 +- .../replication/ReplicationQueuesZKImpl.java | 6 +- .../replication/ReplicationTableBase.java | 8 +- .../replication/ReplicationTrackerZKImpl.java | 7 +- ...TableBasedReplicationQueuesClientImpl.java | 2 +- .../TableBasedReplicationQueuesImpl.java | 2 +- .../hadoop/hbase/security/SecurityInfo.java | 2 +- .../security/access/AccessControlClient.java | 2 +- .../security/access/AccessControlUtil.java | 8 +- .../security/visibility/Authorizations.java | 2 +- .../security/visibility/VisibilityClient.java | 8 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 21 ++-- .../shaded/protobuf/RequestConverter.java | 3 +- .../shaded/protobuf/ResponseConverter.java | 6 +- .../org/apache/hadoop/hbase/util/PoolMap.java | 18 +-- .../apache/hadoop/hbase/util/Writables.java | 2 +- .../hadoop/hbase/zookeeper/HQuorumPeer.java | 2 +- .../hbase/zookeeper/InstancePending.java | 2 +- .../hbase/zookeeper/MetaTableLocator.java | 8 +- .../hbase/zookeeper/PendingWatcher.java | 2 +- .../hbase/zookeeper/RecoverableZooKeeper.java | 4 +- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 30 +++-- .../hbase/zookeeper/ZooKeeperWatcher.java | 7 +- .../TestInterfaceAudienceAnnotations.java | 11 +- .../hadoop/hbase/client/TestAsyncProcess.java | 42 +++---- .../hbase/client/TestClientNoCluster.java | 8 +- .../hbase/filter/TestKeyOnlyFilter.java | 2 +- .../hbase/ipc/TestHBaseRpcControllerImpl.java | 2 +- .../hadoop/hbase/util/BuilderStyleTest.java | 2 +- .../hbase/zookeeper/TestInstancePending.java | 4 +- .../org/apache/hadoop/hbase/CellUtil.java | 2 +- .../org/apache/hadoop/hbase/ChoreService.java | 10 +- .../hadoop/hbase/CompoundConfiguration.java | 7 +- .../org/apache/hadoop/hbase/KeyValue.java | 6 +- .../org/apache/hadoop/hbase/KeyValueUtil.java | 2 +- .../hadoop/hbase/NamespaceDescriptor.java | 6 +- .../org/apache/hadoop/hbase/ServerName.java | 2 +- .../org/apache/hadoop/hbase/TableName.java | 2 +- .../java/org/apache/hadoop/hbase/TagUtil.java | 8 +- .../hbase/io/BoundedByteBufferPool.java | 2 +- .../hbase/io/ByteBufferListOutputStream.java | 4 +- .../hadoop/hbase/io/ByteBufferPool.java | 2 +- .../hadoop/hbase/io/crypto/Encryption.java | 5 +- .../io/encoding/BufferedDataBlockEncoder.java | 2 +- .../hbase/io/encoding/RowIndexCodecV1.java | 2 +- .../hbase/io/encoding/RowIndexSeekerV1.java | 2 +- .../hadoop/hbase/io/util/LRUDictionary.java | 2 +- .../hadoop/hbase/io/util/StreamUtils.java | 12 +- .../hadoop/hbase/nio/SingleByteBuff.java | 2 +- .../apache/hadoop/hbase/security/User.java | 3 +- .../hadoop/hbase/security/UserProvider.java | 2 +- .../hadoop/hbase/trace/SpanReceiverHost.java | 2 +- .../hadoop/hbase/types/StructBuilder.java | 2 +- .../apache/hadoop/hbase/util/ArrayUtils.java | 2 +- .../org/apache/hadoop/hbase/util/AvlUtil.java | 2 +- .../hbase/util/BoundedCompletionService.java | 4 +- .../hadoop/hbase/util/ByteRangeUtils.java | 4 +- .../hadoop/hbase/util/CollectionUtils.java | 3 +- .../hadoop/hbase/util/ConcatenatedLists.java | 2 +- .../hbase/util/CoprocessorClassLoader.java | 4 +- .../org/apache/hadoop/hbase/util/Counter.java | 2 +- .../hadoop/hbase/util/DynamicClassLoader.java | 2 +- .../hadoop/hbase/util/IterableUtils.java | 2 +- .../apache/hadoop/hbase/util/KeyLocker.java | 4 +- .../org/apache/hadoop/hbase/util/Pair.java | 2 +- .../org/apache/hadoop/hbase/util/Threads.java | 2 +- .../org/apache/hadoop/hbase/util/Triple.java | 2 +- .../hadoop/hbase/util/WeakObjectPool.java | 5 +- .../hbase/util/test/RedundantKVGenerator.java | 16 +-- .../org/apache/hadoop/hbase/ClassFinder.java | 10 +- .../apache/hadoop/hbase/ResourceChecker.java | 2 +- .../hbase/ResourceCheckerJUnitListener.java | 6 +- .../org/apache/hadoop/hbase/TestCellUtil.java | 8 +- .../apache/hadoop/hbase/TestClassFinder.java | 2 +- .../hbase/TestCompoundConfiguration.java | 9 +- .../org/apache/hadoop/hbase/TestKeyValue.java | 4 +- .../hbase/io/TestBoundedByteBufferPool.java | 2 +- .../hbase/io/TestTagCompressionContext.java | 4 +- .../hadoop/hbase/nio/TestMultiByteBuff.java | 2 +- .../hbase/types/TestFixedLengthWrapper.java | 8 +- .../hbase/types/TestStructNullExtension.java | 2 +- .../hbase/types/TestTerminatedWrapper.java | 14 +-- .../hbase/util/ClassLoaderTestHelper.java | 4 +- .../apache/hadoop/hbase/util/TestAvlUtil.java | 6 +- .../apache/hadoop/hbase/util/TestBase64.java | 2 +- .../hbase/util/TestBoundedArrayQueue.java | 2 +- .../hbase/util/TestByteBufferUtils.java | 2 +- .../TestByteRangeWithKVSerialization.java | 2 +- .../apache/hadoop/hbase/util/TestBytes.java | 4 +- .../hbase/util/TestConcatenatedLists.java | 14 +-- .../hadoop/hbase/util/TestKeyLocker.java | 2 +- .../hbase/util/TestLoadTestKVGenerator.java | 2 +- .../hadoop/hbase/util/TestWeakObjectPool.java | 4 +- .../client/coprocessor/AggregationClient.java | 36 +++--- .../coprocessor/AggregateImplementation.java | 14 +-- .../ColumnAggregationEndpoint.java | 2 +- ...ColumnAggregationEndpointNullResponse.java | 2 +- .../ColumnAggregationEndpointWithErrors.java | 2 +- .../hbase/coprocessor/TestClassLoading.java | 10 +- .../coprocessor/TestCoprocessorEndpoint.java | 8 +- .../TestCoprocessorTableEndpoint.java | 2 +- .../TestRegionServerCoprocessorEndpoint.java | 6 +- .../coprocessor/TestRowProcessorEndpoint.java | 19 ++-- .../SecureBulkLoadEndpointClient.java | 9 +- ...onServerBulkLoadWithOldSecureEndpoint.java | 3 +- .../TestServerCustomProtocol.java | 14 +-- ...plicationSyncUpToolWithBulkLoadedData.java | 2 +- .../example/BulkDeleteEndpoint.java | 10 +- .../coprocessor/example/RowCountEndpoint.java | 4 +- .../hadoop/hbase/mapreduce/IndexBuilder.java | 2 +- .../hadoop/hbase/thrift/DemoClient.java | 24 ++-- .../hadoop/hbase/thrift/HttpDoAsClient.java | 6 +- .../hadoop/hbase/thrift2/DemoClient.java | 6 +- .../hbase/io/hfile/MemcachedBlockCache.java | 2 +- .../hbase/CompatibilitySingletonFactory.java | 2 +- .../TestCompatibilitySingletonFactory.java | 5 +- .../MetricsHBaseServerSourceFactoryImpl.java | 5 +- .../MetricsStochasticBalancerSourceImpl.java | 4 +- .../apache/hadoop/hbase/metrics/Interns.java | 4 +- .../metrics2/util/MetricSampleQuantiles.java | 4 +- .../hbase/test/MetricsAssertHelperImpl.java | 6 +- .../hadoop/hbase/DistributedHBaseCluster.java | 12 +- .../hadoop/hbase/HBaseClusterManager.java | 2 +- .../IntegrationTestDDLMasterFailover.java | 18 ++- .../hadoop/hbase/IntegrationTestIngest.java | 4 +- .../hbase/IntegrationTestIngestWithACL.java | 2 +- .../hbase/IntegrationTestIngestWithMOB.java | 4 +- .../hbase/IntegrationTestIngestWithTags.java | 2 +- ...grationTestIngestWithVisibilityLabels.java | 18 +-- .../hbase/IntegrationTestLazyCfLoading.java | 3 +- .../IntegrationTestRegionReplicaPerf.java | 4 +- ...tegrationTestRegionReplicaReplication.java | 4 +- .../hadoop/hbase/RESTApiClusterManager.java | 2 +- .../hadoop/hbase/chaos/actions/Action.java | 6 +- .../chaos/actions/BatchRestartRsAction.java | 2 +- .../actions/RestartRandomDataNodeAction.java | 2 +- .../actions/RollingBatchRestartRsAction.java | 6 +- .../UnbalanceKillAndRebalanceAction.java | 6 +- .../chaos/actions/UnbalanceRegionsAction.java | 4 +- .../chaos/monkies/PolicyBasedChaosMonkey.java | 2 +- .../chaos/policies/DoActionsOncePolicy.java | 2 +- .../policies/PeriodicRandomActionPolicy.java | 4 +- .../mapreduce/IntegrationTestBulkLoad.java | 4 +- .../mapreduce/IntegrationTestImportTsv.java | 2 +- .../hbase/mttr/IntegrationTestMTTR.java | 8 +- .../test/IntegrationTestBigLinkedList.java | 8 +- .../test/IntegrationTestLoadAndVerify.java | 2 +- .../test/IntegrationTestReplication.java | 2 +- .../IntegrationTestSendTraceRequests.java | 2 +- .../prefixtree/decode/ArraySearcherPool.java | 3 +- .../prefixtree/decode/PrefixTreeCell.java | 2 +- .../prefixtree/encode/EncoderPoolImpl.java | 3 +- .../prefixtree/encode/other/LongEncoder.java | 2 +- .../prefixtree/encode/tokenize/Tokenizer.java | 2 +- .../util/byterange/impl/ByteRangeHashSet.java | 2 +- .../util/byterange/impl/ByteRangeTreeSet.java | 2 +- .../row/data/TestRowDataExerciseFInts.java | 2 +- .../row/data/TestRowDataTrivialWithTags.java | 2 +- .../prefixtree/row/data/TestRowDataUrls.java | 2 +- .../data/TestTimestampDataBasic.java | 4 +- .../data/TestTimestampDataNumbers.java | 4 +- .../data/TestTimestampDataRepeats.java | 4 +- .../hbase/procedure2/ProcedureExecutor.java | 24 ++-- .../hbase/procedure2/RootProcedureState.java | 6 +- .../procedure2/StateMachineProcedure.java | 2 +- .../procedure2/store/ProcedureStoreBase.java | 3 +- .../store/wal/ProcedureWALPrettyPrinter.java | 2 +- .../store/wal/WALProcedureStore.java | 8 +- .../procedure2/ProcedureTestingUtility.java | 8 +- .../procedure2/TestProcedureExecution.java | 10 +- .../procedure2/TestProcedureReplayOrder.java | 2 +- .../TestProcedureSchedulerConcurrency.java | 3 +- .../procedure2/TestProcedureSuspended.java | 2 +- .../hbase/procedure2/TestYieldProcedures.java | 2 +- .../procedure2/util/TestDelayedUtil.java | 10 +- .../hbase/util/ForeignExceptionUtil.java | 2 +- .../apache/hadoop/hbase/rest/RESTServer.java | 5 +- .../apache/hadoop/hbase/rest/RowResource.java | 2 +- .../org/apache/hadoop/hbase/rest/RowSpec.java | 5 +- .../hadoop/hbase/rest/client/Client.java | 2 +- .../hbase/rest/client/RemoteHTable.java | 9 +- .../hadoop/hbase/rest/filter/GzipFilter.java | 2 +- .../rest/filter/RestCsrfPreventionFilter.java | 4 +- .../hadoop/hbase/rest/model/CellSetModel.java | 2 +- .../hbase/rest/model/ColumnSchemaModel.java | 2 +- .../rest/model/NamespacesInstanceModel.java | 6 +- .../hbase/rest/model/NamespacesModel.java | 4 +- .../hadoop/hbase/rest/model/RowModel.java | 4 +- .../hadoop/hbase/rest/model/ScannerModel.java | 16 +-- .../rest/model/StorageClusterStatusModel.java | 6 +- .../hbase/rest/model/TableInfoModel.java | 2 +- .../hbase/rest/model/TableListModel.java | 2 +- .../hbase/rest/model/TableSchemaModel.java | 4 +- .../hbase/rest/PerformanceEvaluation.java | 10 +- .../hbase/rest/TestGetAndPutResource.java | 6 +- .../hbase/rest/TestMultiRowResource.java | 2 +- .../rest/TestNamespacesInstanceResource.java | 14 +-- .../hbase/rest/TestScannersWithFilters.java | 2 +- .../hadoop/hbase/rest/TestSchemaResource.java | 2 +- .../hbase/rest/client/TestRemoteTable.java | 14 +-- .../model/TestNamespacesInstanceModel.java | 2 +- .../hbase/rsgroup/RSGroupAdminClient.java | 2 +- .../hbase/rsgroup/RSGroupAdminServer.java | 6 +- .../rsgroup/RSGroupBasedLoadBalancer.java | 25 ++--- .../TestRSGroupBasedLoadBalancer.java | 20 ++-- .../hbase/rsgroup/TestRSGroupsBase.java | 2 +- .../hadoop/hbase/HDFSBlocksDistribution.java | 8 +- .../apache/hadoop/hbase/HealthChecker.java | 2 +- .../org/apache/hadoop/hbase/JMXListener.java | 2 +- .../hadoop/hbase/LocalHBaseCluster.java | 9 +- .../SslRMIClientSocketFactorySecure.java | 2 +- .../SslRMIServerSocketFactorySecure.java | 2 +- .../hadoop/hbase/ZKNamespaceManager.java | 2 +- .../hadoop/hbase/backup/HFileArchiver.java | 6 +- .../example/HFileArchiveTableMonitor.java | 2 +- .../hbase/client/ClientSideRegionScanner.java | 2 +- .../hadoop/hbase/client/HTableWrapper.java | 2 +- .../hbase/client/TableSnapshotScanner.java | 2 +- .../hbase/constraint/ConstraintProcessor.java | 2 +- .../hadoop/hbase/constraint/Constraints.java | 6 +- .../ZKSplitLogManagerCoordination.java | 2 +- .../ZkSplitLogWorkerCoordination.java | 2 +- .../hbase/coprocessor/CoprocessorHost.java | 13 +-- .../coprocessor/MultiRowMutationEndpoint.java | 4 +- .../hbase/coprocessor/ObserverContext.java | 4 +- .../hbase/errorhandling/ForeignException.java | 3 +- .../ForeignExceptionDispatcher.java | 3 +- .../hbase/executor/ExecutorService.java | 5 +- .../favored/FavoredNodeAssignmentHelper.java | 37 +++---- .../favored/FavoredNodeLoadBalancer.java | 28 ++--- .../hbase/favored/FavoredNodesPlan.java | 2 +- .../hadoop/hbase/http/HttpRequestLog.java | 2 +- .../apache/hadoop/hbase/http/HttpServer.java | 11 +- .../hbase/http/lib/StaticUserWebFilter.java | 2 +- .../org/apache/hadoop/hbase/io/FileLink.java | 2 +- .../org/apache/hadoop/hbase/io/HFileLink.java | 2 +- .../asyncfs/FanOutOneBlockAsyncDFSOutput.java | 2 +- .../FanOutOneBlockAsyncDFSOutputHelper.java | 3 +- .../hadoop/hbase/io/hfile/BlockCacheUtil.java | 5 +- .../hfile/CacheableDeserializerIdManager.java | 3 +- .../io/hfile/CompoundBloomFilterWriter.java | 2 +- .../apache/hadoop/hbase/io/hfile/HFile.java | 4 +- .../hadoop/hbase/io/hfile/HFileBlock.java | 3 +- .../hbase/io/hfile/HFileBlockIndex.java | 15 ++- .../hbase/io/hfile/HFilePrettyPrinter.java | 10 +- .../hbase/io/hfile/HFileReaderImpl.java | 10 +- .../hbase/io/hfile/HFileWriterImpl.java | 8 +- .../hbase/io/hfile/PrefetchExecutor.java | 3 +- .../hbase/io/hfile/bucket/BucketCache.java | 17 ++- .../hbase/io/hfile/bucket/UniqueIndexMap.java | 4 +- .../hadoop/hbase/io/util/MemorySizeUtil.java | 4 +- .../hadoop/hbase/ipc/FifoRpcScheduler.java | 2 +- .../apache/hadoop/hbase/ipc/RpcServer.java | 18 ++- .../hadoop/hbase/ipc/SimpleRpcServer.java | 7 +- .../hadoop/hbase/mapred/GroupingTableMap.java | 2 +- .../hadoop/hbase/mapreduce/CopyTable.java | 2 +- .../DefaultVisibilityExpressionResolver.java | 2 +- .../hbase/mapreduce/GroupingTableMapper.java | 2 +- .../hbase/mapreduce/HFileOutputFormat2.java | 24 ++-- .../hadoop/hbase/mapreduce/HashTable.java | 6 +- .../apache/hadoop/hbase/mapreduce/Import.java | 4 +- .../hadoop/hbase/mapreduce/ImportTsv.java | 10 +- .../hbase/mapreduce/KeyValueSortReducer.java | 2 +- .../mapreduce/LoadIncrementalHFiles.java | 6 +- .../mapreduce/MultiHFileOutputFormat.java | 5 +- .../mapreduce/MultiTableInputFormat.java | 2 +- .../mapreduce/MultiTableInputFormatBase.java | 6 +- .../hbase/mapreduce/PutSortReducer.java | 4 +- .../hbase/mapreduce/ResultSerialization.java | 2 +- .../hbase/mapreduce/TableInputFormatBase.java | 9 +- .../hbase/mapreduce/TableMapReduceUtil.java | 8 +- .../mapreduce/TableSnapshotInputFormat.java | 2 +- .../TableSnapshotInputFormatImpl.java | 4 +- .../hbase/mapreduce/TextSortReducer.java | 4 +- .../hbase/mapreduce/TsvImporterMapper.java | 2 +- .../hbase/mapreduce/WALInputFormat.java | 4 +- .../hadoop/hbase/mapreduce/WALPlayer.java | 2 +- .../hbase/master/AssignmentManager.java | 56 +++++----- .../master/AssignmentVerificationReport.java | 48 +++----- .../hadoop/hbase/master/BulkReOpen.java | 2 +- .../hadoop/hbase/master/CatalogJanitor.java | 20 ++-- .../master/ClusterSchemaServiceImpl.java | 2 +- .../hbase/master/ClusterStatusPublisher.java | 7 +- .../hadoop/hbase/master/DeadServer.java | 8 +- .../hbase/master/GeneralBulkAssigner.java | 7 +- .../apache/hadoop/hbase/master/HMaster.java | 19 ++-- .../hbase/master/MasterMetaBootstrap.java | 4 +- .../master/MasterMobCompactionThread.java | 2 +- .../hbase/master/MasterRpcServices.java | 6 +- .../hadoop/hbase/master/MasterWalManager.java | 8 +- .../hadoop/hbase/master/RackManager.java | 2 +- .../master/RegionPlacementMaintainer.java | 35 +++--- .../hadoop/hbase/master/RegionStates.java | 84 ++++++-------- .../hadoop/hbase/master/ServerManager.java | 33 +++--- .../SnapshotOfRegionAssignmentFromMeta.java | 26 ++--- .../hadoop/hbase/master/SplitLogManager.java | 23 ++-- .../master/balancer/BaseLoadBalancer.java | 63 ++++++----- .../master/balancer/ClusterLoadState.java | 2 +- .../master/balancer/RegionLocationFinder.java | 11 +- .../master/balancer/SimpleLoadBalancer.java | 12 +- .../balancer/StochasticLoadBalancer.java | 10 +- .../hbase/master/cleaner/CleanerChore.java | 2 +- .../cleaner/ReplicationMetaCleaner.java | 2 +- .../cleaner/ReplicationZKNodeCleaner.java | 2 +- .../hbase/master/locking/LockProcedure.java | 2 +- .../normalizer/SimpleRegionNormalizer.java | 2 +- .../procedure/CloneSnapshotProcedure.java | 9 +- .../procedure/CreateTableProcedure.java | 5 +- .../procedure/DeleteTableProcedure.java | 4 +- .../procedure/EnableTableProcedure.java | 5 +- .../procedure/MasterDDLOperationHelper.java | 4 +- .../procedure/MergeTableRegionsProcedure.java | 2 +- .../procedure/ModifyTableProcedure.java | 2 +- .../procedure/RestoreSnapshotProcedure.java | 13 +-- .../procedure/ServerCrashProcedure.java | 8 +- .../procedure/SplitTableRegionProcedure.java | 10 +- .../procedure/TruncateTableProcedure.java | 4 +- .../DisabledTableSnapshotHandler.java | 2 +- .../snapshot/EnabledTableSnapshotHandler.java | 2 +- .../master/snapshot/SnapshotFileCache.java | 7 +- .../master/snapshot/SnapshotManager.java | 13 +-- .../master/snapshot/TakeSnapshotHandler.java | 2 +- .../hbase/mob/DefaultMobStoreCompactor.java | 2 +- .../hbase/mob/DefaultMobStoreFlusher.java | 4 +- .../org/apache/hadoop/hbase/mob/MobFile.java | 4 +- .../apache/hadoop/hbase/mob/MobFileCache.java | 6 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 6 +- .../PartitionedMobCompactionRequest.java | 2 +- .../monitoring/MonitoredRPCHandlerImpl.java | 2 +- .../hbase/monitoring/MonitoredTaskImpl.java | 2 +- .../hadoop/hbase/monitoring/TaskMonitor.java | 6 +- .../namespace/NamespaceStateManager.java | 2 +- .../NamespaceTableAndRegionInfo.java | 2 +- .../procedure/MasterProcedureManagerHost.java | 3 +- .../hadoop/hbase/procedure/Procedure.java | 6 +- .../hbase/procedure/ProcedureCoordinator.java | 4 +- .../hbase/procedure/ProcedureManagerHost.java | 6 +- .../hbase/procedure/ProcedureMember.java | 2 +- .../MasterFlushTableProcedureManager.java | 4 +- ...egionServerFlushTableProcedureManager.java | 6 +- .../protobuf/ReplicationProtbufUtil.java | 4 +- .../hbase/quotas/MasterQuotaManager.java | 8 +- .../hadoop/hbase/quotas/QuotaCache.java | 13 +-- .../apache/hadoop/hbase/quotas/QuotaUtil.java | 4 +- .../hadoop/hbase/quotas/UserQuotaState.java | 4 +- .../regionserver/AbstractMultiFileWriter.java | 4 +- .../AnnotationReadingPriorityFunction.java | 12 +- .../hbase/regionserver/BaseRowProcessor.java | 2 +- .../hadoop/hbase/regionserver/CellSet.java | 2 +- .../regionserver/CompactSplitThread.java | 4 +- .../regionserver/CompactingMemStore.java | 2 +- .../hbase/regionserver/CompactionTool.java | 6 +- .../CompositeImmutableSegment.java | 4 +- .../DateTieredMultiFileWriter.java | 3 +- .../hbase/regionserver/DefaultMemStore.java | 4 +- .../regionserver/DefaultStoreFileManager.java | 16 +-- .../regionserver/DefaultStoreFlusher.java | 2 +- .../FlushAllLargeStoresPolicy.java | 2 +- .../FlushNonSloppyStoresFirstPolicy.java | 2 +- .../hadoop/hbase/regionserver/HMobStore.java | 6 +- .../hadoop/hbase/regionserver/HRegion.java | 101 ++++++++--------- .../hbase/regionserver/HRegionFileSystem.java | 4 +- .../hbase/regionserver/HRegionServer.java | 26 ++--- .../hadoop/hbase/regionserver/HStore.java | 54 +++++---- .../hbase/regionserver/HeapMemoryManager.java | 2 +- .../hbase/regionserver/ImmutableSegment.java | 2 +- .../hbase/regionserver/KeyValueHeap.java | 6 +- .../hadoop/hbase/regionserver/Leases.java | 2 +- .../hadoop/hbase/regionserver/LogRoller.java | 3 +- .../hadoop/hbase/regionserver/LruHashMap.java | 6 +- .../MemStoreCompactorSegmentsIterator.java | 2 +- .../hbase/regionserver/MemStoreFlusher.java | 10 +- .../hbase/regionserver/MemStoreLABImpl.java | 2 +- .../MemStoreSegmentsIterator.java | 4 +- .../MultiRowMutationProcessor.java | 2 +- .../MultiVersionConcurrencyControl.java | 2 +- .../hbase/regionserver/RSRpcServices.java | 34 +++--- .../regionserver/RegionCoprocessorHost.java | 4 +- .../regionserver/RegionServerAccounting.java | 2 +- .../regionserver/RegionServicesForStores.java | 2 +- .../regionserver/SecureBulkLoadManager.java | 12 +- .../hadoop/hbase/regionserver/Segment.java | 4 +- .../hbase/regionserver/SegmentFactory.java | 2 +- .../regionserver/ServerNonceManager.java | 3 +- .../hbase/regionserver/ShutdownHook.java | 2 +- .../hbase/regionserver/StoreFileScanner.java | 2 +- .../hbase/regionserver/StoreFlusher.java | 2 +- .../hbase/regionserver/StoreScanner.java | 14 +-- .../regionserver/StorefileRefresherChore.java | 2 +- .../regionserver/StripeMultiFileWriter.java | 6 +- .../hbase/regionserver/StripeStoreEngine.java | 2 +- .../regionserver/StripeStoreFileManager.java | 55 +++++----- .../regionserver/StripeStoreFlusher.java | 2 +- .../compactions/CompactionRequest.java | 2 +- .../regionserver/compactions/Compactor.java | 6 +- .../DateTieredCompactionPolicy.java | 8 +- .../ExploringCompactionPolicy.java | 10 +- .../compactions/FIFOCompactionPolicy.java | 2 +- .../compactions/SortedCompactionPolicy.java | 4 +- .../compactions/StripeCompactionPolicy.java | 10 +- .../querymatcher/ScanDeleteTracker.java | 3 + .../snapshot/RegionServerSnapshotManager.java | 6 +- .../PressureAwareThroughputController.java | 3 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 4 +- .../hbase/regionserver/wal/AsyncFSWAL.java | 2 +- .../wal/AsyncProtobufLogWriter.java | 2 +- .../hadoop/hbase/regionserver/wal/FSHLog.java | 4 +- .../regionserver/wal/ProtobufLogReader.java | 2 +- .../wal/SecureProtobufLogReader.java | 2 +- .../wal/SequenceIdAccounting.java | 4 +- .../hbase/regionserver/wal/WALEdit.java | 4 +- .../regionserver/wal/WALEditsReplaySink.java | 4 +- .../hbase/replication/BulkLoadCellFilter.java | 2 +- .../replication/ChainWALEntryFilter.java | 2 +- .../replication/HBaseReplicationEndpoint.java | 4 +- .../regionserver/DumpReplicationQueues.java | 6 +- .../HBaseInterClusterReplicationEndpoint.java | 8 +- .../regionserver/HFileReplicator.java | 8 +- .../regionserver/MetricsSource.java | 2 +- .../RegionReplicaReplicationEndpoint.java | 8 +- .../replication/regionserver/Replication.java | 2 +- .../regionserver/ReplicationLoad.java | 6 +- .../regionserver/ReplicationSink.java | 23 ++-- .../regionserver/ReplicationSource.java | 8 +- .../ReplicationSourceManager.java | 26 ++--- .../security/access/AccessControlLists.java | 18 ++- .../security/access/AccessController.java | 14 +-- .../security/access/TableAuthManager.java | 13 +-- .../security/access/ZKPermissionWatcher.java | 3 +- .../AuthenticationTokenSecretManager.java | 6 +- .../hbase/security/token/TokenUtil.java | 2 +- .../DefaultVisibilityLabelServiceImpl.java | 40 +++---- .../DefinedSetFilterScanLabelGenerator.java | 8 +- .../EnforcingScanLabelGenerator.java | 4 +- .../security/visibility/ExpressionParser.java | 4 +- .../FeedUserAuthScanLabelGenerator.java | 4 +- .../visibility/VisibilityController.java | 20 ++-- .../visibility/VisibilityLabelsCache.java | 19 ++-- .../VisibilityReplicationEndpoint.java | 6 +- .../VisibilityScanDeleteTracker.java | 45 ++++---- .../security/visibility/VisibilityUtils.java | 8 +- .../expression/NonLeafExpressionNode.java | 4 +- .../hadoop/hbase/snapshot/ExportSnapshot.java | 15 ++- .../hbase/snapshot/RestoreSnapshotHelper.java | 34 +++--- .../hadoop/hbase/snapshot/SnapshotInfo.java | 3 +- .../hbase/snapshot/SnapshotManifest.java | 10 +- .../hbase/snapshot/SnapshotManifestV1.java | 5 +- .../hbase/snapshot/SnapshotManifestV2.java | 5 +- .../hbase/snapshot/SnapshotReferenceUtil.java | 5 +- .../org/apache/hadoop/hbase/tool/Canary.java | 30 ++--- .../util/BoundedPriorityBlockingQueue.java | 2 +- .../hbase/util/CollectionBackedScanner.java | 2 +- .../hadoop/hbase/util/ConnectionCache.java | 5 +- .../hadoop/hbase/util/EncryptionTest.java | 7 +- .../apache/hadoop/hbase/util/FSHDFSUtils.java | 2 +- .../hadoop/hbase/util/FSRegionScanner.java | 4 +- .../hadoop/hbase/util/FSTableDescriptors.java | 9 +- .../org/apache/hadoop/hbase/util/FSUtils.java | 35 +++--- .../apache/hadoop/hbase/util/HBaseFsck.java | 77 ++++++------- .../org/apache/hadoop/hbase/util/IdLock.java | 3 +- .../hadoop/hbase/util/IdReadWriteLock.java | 3 +- .../apache/hadoop/hbase/util/JvmVersion.java | 2 +- .../hadoop/hbase/util/ModifyRegionUtils.java | 8 +- .../hadoop/hbase/util/MunkresAssignment.java | 13 +-- .../apache/hadoop/hbase/util/RegionMover.java | 15 ++- .../hbase/util/RegionSizeCalculator.java | 2 +- .../hbase/util/RegionSplitCalculator.java | 8 +- .../hadoop/hbase/util/RegionSplitter.java | 2 +- .../hadoop/hbase/util/ServerCommandLine.java | 2 +- .../hbase/util/SortedCopyOnWriteSet.java | 20 ++-- .../apache/hadoop/hbase/util/SortedList.java | 18 +-- .../util/hbck/HFileCorruptionChecker.java | 34 +++--- .../hbase/wal/AbstractFSWALProvider.java | 2 +- .../hbase/wal/BoundedGroupingStrategy.java | 3 +- .../hadoop/hbase/wal/DisabledWALProvider.java | 5 +- .../hbase/wal/RegionGroupingProvider.java | 2 +- .../apache/hadoop/hbase/wal/WALFactory.java | 4 +- .../org/apache/hadoop/hbase/wal/WALKey.java | 10 +- .../hadoop/hbase/wal/WALPrettyPrinter.java | 8 +- .../apache/hadoop/hbase/wal/WALSplitter.java | 52 ++++----- .../zookeeper/DrainingServerTracker.java | 2 +- .../hbase/zookeeper/MiniZooKeeperCluster.java | 6 +- .../hbase/zookeeper/RegionServerTracker.java | 5 +- .../hadoop/hbase/zookeeper/ZKServerTool.java | 2 +- .../resources/hbase-webapps/master/table.jsp | 10 +- .../hadoop/hbase/HBaseTestingUtility.java | 14 +-- .../org/apache/hadoop/hbase/HTestConst.java | 2 +- .../apache/hadoop/hbase/MetaMockingUtil.java | 2 +- .../apache/hadoop/hbase/MiniHBaseCluster.java | 4 +- .../hbase/MockRegionServerServices.java | 4 +- .../hadoop/hbase/MultithreadedTestUtil.java | 2 +- .../hadoop/hbase/PerformanceEvaluation.java | 10 +- .../hbase/PerformanceEvaluationCommons.java | 2 +- .../hadoop/hbase/TestCheckTestClasses.java | 2 +- .../hadoop/hbase/TestGlobalMemStoreSize.java | 2 +- .../hbase/TestHDFSBlocksDistribution.java | 2 +- .../hbase/TestMetaTableAccessorNoCluster.java | 6 +- .../TestPartialResultsFromClientSide.java | 2 +- .../hadoop/hbase/TestRegionRebalancing.java | 2 +- ...stServerSideScanMetricsFromClientSide.java | 2 +- .../TestStochasticBalancerJmxMetrics.java | 4 +- .../hbase/backup/TestHFileArchiving.java | 6 +- .../TestZooKeeperTableArchiveClient.java | 10 +- .../hadoop/hbase/client/TestAdmin1.java | 6 +- .../hadoop/hbase/client/TestAdmin2.java | 2 +- ...tAvoidCellReferencesIntoShippedBlocks.java | 6 +- .../client/TestBlockEvictionFromClient.java | 4 +- .../client/TestClientOperationInterrupt.java | 2 +- .../hbase/client/TestConnectionUtils.java | 4 +- .../hadoop/hbase/client/TestFastFail.java | 2 +- .../hbase/client/TestFromClientSide.java | 10 +- .../hbase/client/TestFromClientSide3.java | 8 +- .../hbase/client/TestHBaseAdminNoCluster.java | 2 +- .../apache/hadoop/hbase/client/TestHCM.java | 4 +- .../hbase/client/TestHTableMultiplexer.java | 2 +- .../client/TestIllegalTableDescriptor.java | 2 +- .../client/TestIncrementsFromClientSide.java | 2 +- .../hbase/client/TestIntraRowPagination.java | 6 +- .../hbase/client/TestMultiParallel.java | 24 ++-- .../hbase/client/TestReplicaWithCluster.java | 4 +- .../hbase/client/TestReplicasClient.java | 8 +- .../client/TestRestoreSnapshotFromClient.java | 2 +- .../client/TestScannersFromClientSide.java | 38 +++---- .../hbase/client/TestSnapshotFromClient.java | 4 +- .../hbase/client/TestSnapshotMetadata.java | 6 +- .../hbase/client/TestTimestampsFilter.java | 4 +- .../replication/TestReplicationAdmin.java | 26 ++--- .../TestReplicationAdminWithClusters.java | 3 +- .../hbase/constraint/TestConstraints.java | 10 +- .../coprocessor/TestCoprocessorInterface.java | 2 +- .../hbase/coprocessor/TestHTableWrapper.java | 2 +- .../TestOpenTableInCoprocessor.java | 2 +- .../coprocessor/TestRegionObserverBypass.java | 2 +- .../TestRegionObserverInterface.java | 2 +- .../hbase/coprocessor/TestWALObserver.java | 9 +- .../TestFavoredNodeAssignmentHelper.java | 45 ++++---- .../hbase/filter/TestColumnPrefixFilter.java | 26 ++--- .../hbase/filter/TestColumnRangeFilter.java | 18 +-- .../filter/TestDependentColumnFilter.java | 6 +- .../hadoop/hbase/filter/TestFilter.java | 40 +++---- .../hadoop/hbase/filter/TestFilterList.java | 8 +- .../TestFilterListOrOperatorWithBlkCnt.java | 8 +- .../hbase/filter/TestFilterSerialization.java | 18 +-- .../filter/TestFilterWithScanLimits.java | 2 +- .../hbase/filter/TestFilterWrapper.java | 4 +- ...FirstKeyValueMatchingQualifiersFilter.java | 2 +- .../TestFuzzyRowAndColumnRangeFilter.java | 4 +- .../filter/TestFuzzyRowFilterEndToEnd.java | 24 ++-- .../filter/TestInvocationRecordFilter.java | 12 +- .../hbase/filter/TestMultiRowRangeFilter.java | 60 +++++----- .../TestMultipleColumnPrefixFilter.java | 34 +++--- .../TestSingleColumnValueExcludeFilter.java | 2 +- .../hadoop/hbase/http/TestGlobalFilter.java | 2 +- .../hadoop/hbase/http/TestHttpServer.java | 6 +- .../hadoop/hbase/http/TestPathFilter.java | 2 +- .../hbase/http/resource/JerseyResource.java | 2 +- .../hbase/http/ssl/KeyStoreTestUtil.java | 2 +- .../apache/hadoop/hbase/io/TestFileLink.java | 4 +- .../hbase/io/TestHalfStoreFileReader.java | 2 +- .../TestBufferedDataBlockEncoder.java | 2 +- .../io/encoding/TestChangingEncoding.java | 3 +- .../io/encoding/TestDataBlockEncoders.java | 9 +- .../hbase/io/encoding/TestEncodedSeekers.java | 2 +- .../hbase/io/encoding/TestPrefixTree.java | 4 +- .../io/encoding/TestPrefixTreeEncoding.java | 7 +- .../TestSeekBeforeWithReverseScan.java | 4 +- .../encoding/TestSeekToBlockWithEncoders.java | 20 ++-- .../hadoop/hbase/io/hfile/CacheTestUtils.java | 4 +- .../hbase/io/hfile/RandomDistribution.java | 4 +- .../hbase/io/hfile/TestCacheOnWrite.java | 13 +-- .../hbase/io/hfile/TestFixedFileTrailer.java | 2 +- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 24 ++-- .../hbase/io/hfile/TestHFileBlockIndex.java | 8 +- .../io/hfile/TestHFileDataBlockEncoder.java | 3 +- .../TestHFileInlineToRootChunkConversion.java | 2 +- .../hbase/io/hfile/TestHFileWriterV3.java | 4 +- .../hfile/TestLazyDataBlockDecompression.java | 2 +- .../hadoop/hbase/io/hfile/TestReseekTo.java | 4 +- .../io/hfile/TestScannerFromBucketCache.java | 6 +- .../TestScannerSelectionUsingKeyRange.java | 6 +- .../hfile/TestScannerSelectionUsingTTL.java | 4 +- .../hadoop/hbase/io/hfile/TestSeekTo.java | 2 +- .../io/hfile/bucket/TestBucketCache.java | 4 +- .../hfile/bucket/TestBucketWriterThread.java | 2 +- .../hbase/ipc/TestSimpleRpcScheduler.java | 4 +- .../hbase/mapred/TestTableMapReduceUtil.java | 2 +- .../MultiTableInputFormatTestBase.java | 2 +- .../hbase/mapreduce/NMapInputFormat.java | 5 +- .../mapreduce/TestGroupingTableMapper.java | 2 +- .../mapreduce/TestHFileOutputFormat2.java | 14 +-- .../mapreduce/TestHRegionPartitioner.java | 4 +- .../hadoop/hbase/mapreduce/TestHashTable.java | 3 +- .../hbase/mapreduce/TestImportExport.java | 2 +- .../TestImportTSVWithOperationAttributes.java | 2 +- .../mapreduce/TestImportTSVWithTTLs.java | 2 +- .../TestImportTSVWithVisibilityLabels.java | 6 +- .../hadoop/hbase/mapreduce/TestImportTsv.java | 6 +- .../hbase/mapreduce/TestImportTsvParser.java | 2 +- .../mapreduce/TestLoadIncrementalHFiles.java | 4 +- .../mapreduce/TestMultiHFileOutputFormat.java | 4 +- .../hbase/mapreduce/TestRowCounter.java | 2 +- .../TestSimpleTotalOrderPartitioner.java | 3 +- .../hbase/mapreduce/TestTableSplit.java | 4 +- .../hbase/mapreduce/TestTimeRangeMapRed.java | 7 +- .../hadoop/hbase/mapreduce/TestWALPlayer.java | 2 +- .../hbase/mapreduce/TestWALRecordReader.java | 3 +- .../hadoop/hbase/master/MockRegionServer.java | 13 +-- .../hbase/master/TestAssignmentListener.java | 6 +- .../TestAssignmentManagerOnCluster.java | 8 +- .../hbase/master/TestCatalogJanitor.java | 11 +- .../master/TestClusterStatusPublisher.java | 10 +- .../master/TestDistributedLogSplitting.java | 14 +-- .../hbase/master/TestMasterNoCluster.java | 2 +- ...TestMasterOperationsForRegionReplicas.java | 8 +- .../hbase/master/TestMasterStatusServlet.java | 7 +- .../hbase/master/TestMasterWalManager.java | 2 +- .../hbase/master/TestRegionPlacement.java | 11 +- .../hbase/master/TestRegionPlacement2.java | 12 +- .../hbase/master/TestRollingRestart.java | 4 +- .../master/balancer/BalancerTestBase.java | 38 +++---- .../master/balancer/TestBaseLoadBalancer.java | 44 ++++---- .../balancer/TestDefaultLoadBalancer.java | 10 +- .../balancer/TestRegionLocationFinder.java | 4 +- .../balancer/TestStochasticLoadBalancer.java | 13 +-- .../hbase/master/cleaner/TestLogsCleaner.java | 2 +- .../cleaner/TestReplicationHFileCleaner.java | 6 +- .../MasterProcedureTestingUtility.java | 2 +- ...stMasterProcedureSchedulerConcurrency.java | 4 +- .../snapshot/TestSnapshotFileCache.java | 4 +- .../snapshot/TestSnapshotHFileCleaner.java | 2 +- .../mob/compactions/TestMobCompactor.java | 4 +- .../TestPartitionedMobCompactor.java | 6 +- .../SimpleMasterProcedureManager.java | 2 +- .../procedure/SimpleRSProcedureManager.java | 6 +- .../hadoop/hbase/procedure/TestProcedure.java | 8 +- .../hbase/procedure/TestProcedureManager.java | 2 +- .../hbase/procedure/TestZKProcedure.java | 14 +-- .../procedure/TestZKProcedureControllers.java | 14 +-- .../protobuf/TestReplicationProtobuf.java | 8 +- ...bstractTestDateTieredCompactionPolicy.java | 2 +- .../regionserver/DataBlockEncodingTool.java | 13 +-- .../EncodedSeekPerformanceTest.java | 4 +- .../regionserver/KeyValueScanFixture.java | 2 +- .../hbase/regionserver/MockStoreFile.java | 2 +- .../hbase/regionserver/OOMERegionServer.java | 2 +- .../hbase/regionserver/RegionAsTable.java | 4 +- .../regionserver/TestAtomicOperation.java | 6 +- .../hbase/regionserver/TestBlocksRead.java | 4 +- .../hbase/regionserver/TestBlocksScanned.java | 2 +- .../hbase/regionserver/TestBulkLoad.java | 13 +-- .../TestCacheOnWriteInSchema.java | 2 +- .../hbase/regionserver/TestColumnSeeking.java | 20 ++-- .../regionserver/TestCompactingMemStore.java | 8 +- .../hbase/regionserver/TestCompaction.java | 24 ++-- .../TestCompactionArchiveConcurrentClose.java | 2 +- .../TestCompactionArchiveIOException.java | 2 +- .../regionserver/TestCompactionPolicy.java | 10 +- .../regionserver/TestCompactionState.java | 2 +- .../regionserver/TestCompoundBloomFilter.java | 2 +- .../TestCorruptedRegionStoreFile.java | 2 +- .../TestDefaultCompactSelection.java | 4 +- .../regionserver/TestDefaultMemStore.java | 18 +-- .../TestEncryptionKeyRotation.java | 4 +- .../TestEncryptionRandomKeying.java | 2 +- .../TestEndToEndSplitTransaction.java | 4 +- .../regionserver/TestFSErrorsExposed.java | 5 +- .../TestGetClosestAtOrBefore.java | 4 +- .../hbase/regionserver/TestHMobStore.java | 17 ++- .../hbase/regionserver/TestHRegion.java | 103 +++++++++--------- .../regionserver/TestHRegionOnCluster.java | 2 +- .../regionserver/TestHRegionReplayEvents.java | 11 +- .../TestHRegionServerBulkLoad.java | 5 +- ...estHRegionServerBulkLoadWithOldClient.java | 3 +- .../regionserver/TestJoinedScanners.java | 2 +- .../hbase/regionserver/TestKeepDeletes.java | 20 ++-- .../hbase/regionserver/TestKeyValueHeap.java | 12 +- .../regionserver/TestMajorCompaction.java | 11 +- .../hbase/regionserver/TestMemStoreLAB.java | 2 +- .../hbase/regionserver/TestMinVersions.java | 2 +- .../TestMiniBatchOperationInProgress.java | 6 +- .../regionserver/TestMultiColumnScanner.java | 16 +-- .../regionserver/TestRegionFavoredNodes.java | 3 +- .../regionserver/TestRegionIncrement.java | 4 +- .../TestRegionMergeTransactionOnCluster.java | 6 +- .../TestRegionReplicaFailover.java | 2 +- .../regionserver/TestRegionReplicas.java | 2 +- .../regionserver/TestRegionSplitPolicy.java | 6 +- .../regionserver/TestReversibleScanners.java | 7 +- .../regionserver/TestSCVFWithMiniCluster.java | 2 +- .../regionserver/TestScanWithBloomError.java | 12 +- .../hbase/regionserver/TestScanner.java | 16 +-- .../TestScannerRetriableFailure.java | 2 +- .../regionserver/TestSeekOptimizations.java | 18 +-- .../hadoop/hbase/regionserver/TestStore.java | 12 +- .../hbase/regionserver/TestStoreFile.java | 10 +- .../TestStoreFileRefresherChore.java | 2 +- .../hbase/regionserver/TestStoreScanner.java | 54 ++++----- .../regionserver/TestStripeStoreEngine.java | 4 +- .../TestStripeStoreFileManager.java | 12 +- .../hadoop/hbase/regionserver/TestTags.java | 6 +- .../hbase/regionserver/TestWALLockup.java | 4 +- .../hbase/regionserver/TestWideScanner.java | 4 +- .../ConstantSizeFileListGenerator.java | 2 +- .../compactions/EverythingPolicy.java | 4 +- .../GaussianFileListGenerator.java | 2 +- .../compactions/MockStoreFileGenerator.java | 2 +- .../PerfTestCompactionPolicies.java | 10 +- .../SemiConstantSizeFileListGenerator.java | 2 +- .../SinusoidalFileListGenerator.java | 2 +- .../compactions/SpikyFileListGenerator.java | 2 +- .../TestCompactedHFilesDischarger.java | 4 +- .../compactions/TestCompactor.java | 8 +- .../compactions/TestDateTieredCompactor.java | 2 +- .../TestStripeCompactionPolicy.java | 38 +++---- .../compactions/TestStripeCompactor.java | 2 +- .../TestCompactionScanQueryMatcher.java | 3 +- .../TestExplicitColumnTracker.java | 18 +-- .../TestScanWildcardColumnTracker.java | 14 +-- .../TestUserScanQueryMatcher.java | 16 +-- .../regionserver/wal/AbstractTestFSWAL.java | 8 +- .../wal/AbstractTestLogRollPeriod.java | 2 +- .../wal/AbstractTestWALReplay.java | 21 ++-- .../wal/FaultyProtobufLogReader.java | 2 +- .../hbase/regionserver/wal/TestFSHLog.java | 2 +- .../wal/TestKeyValueCompression.java | 2 +- .../regionserver/wal/TestLogRollAbort.java | 3 +- .../regionserver/wal/TestLogRolling.java | 6 +- .../wal/TestLogRollingNoCluster.java | 3 +- .../wal/TestSequenceIdAccounting.java | 10 +- .../wal/TestWALActionsListener.java | 5 +- .../wal/TestWALCellCodecWithCompression.java | 4 +- .../replication/TestNamespaceReplication.java | 2 +- .../TestPerTableCFReplication.java | 20 ++-- .../replication/TestReplicationBase.java | 3 +- .../replication/TestReplicationEndpoint.java | 6 +- .../TestReplicationSmallTests.java | 7 +- .../TestReplicationStateBasic.java | 12 +- .../TestReplicationSyncUpTool.java | 2 +- .../TestReplicationTrackerZKImpl.java | 2 +- .../TestReplicationWALEntryFilters.java | 36 +++--- .../replication/TestReplicationWithTags.java | 4 +- ...ionReplicaReplicationEndpointNoMaster.java | 2 +- .../regionserver/TestReplicationSink.java | 32 +++--- .../TestReplicationSourceManager.java | 16 ++- .../HbaseObjectWritableFor96Migration.java | 6 +- .../access/TestAccessControlFilter.java | 2 +- .../security/access/TestAccessController.java | 4 +- .../TestCellACLWithMultipleVersions.java | 2 +- .../hbase/security/access/TestCellACLs.java | 2 +- .../access/TestWithDisabledAuthorization.java | 2 +- .../access/TestZKPermissionsWatcher.java | 4 +- .../token/TestTokenAuthentication.java | 7 +- ...ExpAsStringVisibilityLabelServiceImpl.java | 16 +-- .../LabelFilteringScanLabelGenerator.java | 2 +- .../visibility/TestVisibilityLabels.java | 14 +-- ...bilityLabelsOpWithDifferentUsersNoACL.java | 2 +- .../TestVisibilityLabelsReplication.java | 6 +- .../TestVisibilityLabelsWithACL.java | 4 +- ...sibilityLabelsWithCustomVisLabService.java | 2 +- .../TestVisibilityLabelsWithDeletes.java | 6 +- .../TestVisibilityLablesWithGroups.java | 4 +- .../TestWithDisabledAuthorization.java | 4 +- .../hbase/snapshot/SnapshotTestingUtils.java | 10 +- .../hbase/snapshot/TestExportSnapshot.java | 6 +- .../snapshot/TestExportSnapshotHelpers.java | 4 +- .../snapshot/TestFlushSnapshotFromClient.java | 2 +- .../hadoop/hbase/util/BaseTestHBaseFsck.java | 9 +- .../hadoop/hbase/util/ConstantDelayQueue.java | 10 +- .../hbase/util/HFileArchiveTestingUtil.java | 10 +- .../util/LoadTestDataGeneratorWithTags.java | 4 +- .../hadoop/hbase/util/LoadTestTool.java | 4 +- .../hbase/util/MultiThreadedAction.java | 2 +- .../hbase/util/MultiThreadedReader.java | 2 +- .../util/MultiThreadedReaderWithACL.java | 4 +- .../hbase/util/MultiThreadedUpdater.java | 2 +- .../util/MultiThreadedUpdaterWithACL.java | 4 +- .../hbase/util/MultiThreadedWriter.java | 2 +- .../hbase/util/MultiThreadedWriterBase.java | 6 +- .../util/ProcessBasedLocalHBaseCluster.java | 15 ++- .../TestBoundedPriorityBlockingQueue.java | 4 +- .../hbase/util/TestCoprocessorScanPolicy.java | 8 +- .../hadoop/hbase/util/TestFSVisitor.java | 12 +- .../hbase/util/TestHBaseFsckEncryption.java | 2 +- .../hadoop/hbase/util/TestHBaseFsckMOB.java | 2 +- .../hadoop/hbase/util/TestHBaseFsckOneRS.java | 6 +- .../hbase/util/TestHBaseFsckReplicas.java | 4 +- .../hadoop/hbase/util/TestHBaseFsckTwoRS.java | 2 +- .../apache/hadoop/hbase/util/TestIdLock.java | 5 +- .../hbase/util/TestIdReadWriteLock.java | 5 +- .../util/TestMiniClusterLoadEncoded.java | 2 +- .../util/TestMiniClusterLoadSequential.java | 2 +- .../apache/hadoop/hbase/util/TestPoolMap.java | 6 +- .../hbase/util/TestRegionSizeCalculator.java | 2 +- .../hbase/util/TestRegionSplitCalculator.java | 44 +++----- .../hadoop/hbase/util/TestRegionSplitter.java | 6 +- .../hbase/util/TestSortedCopyOnWriteSet.java | 5 +- .../hadoop/hbase/util/TestSortedList.java | 11 +- .../util/hbck/OfflineMetaRebuildTestCore.java | 2 +- .../hadoop/hbase/wal/IOTestProvider.java | 2 +- .../TestBoundedRegionGroupingStrategy.java | 2 +- .../hadoop/hbase/wal/TestFSHLogProvider.java | 10 +- .../hadoop/hbase/wal/TestSecureWAL.java | 3 +- .../hadoop/hbase/wal/TestWALFactory.java | 12 +- .../hadoop/hbase/wal/TestWALFiltering.java | 3 +- .../hbase/wal/TestWALReaderOnSecureWAL.java | 3 +- .../apache/hadoop/hbase/wal/TestWALSplit.java | 8 +- .../hbase/wal/WALPerformanceEvaluation.java | 7 +- .../hadoop/hbase/zookeeper/TestZKMulti.java | 32 +++--- .../hbase/zookeeper/TestZooKeeperACL.java | 2 +- .../apache/hadoop/hbase/thrift/CallQueue.java | 2 +- .../hbase/thrift/IncrementCoalescer.java | 4 +- .../thrift/TBoundedThreadPoolServer.java | 4 +- .../hbase/thrift/ThriftServerRunner.java | 31 +++--- .../hadoop/hbase/thrift/ThriftUtilities.java | 12 +- .../thrift2/ThriftHBaseServiceHandler.java | 3 +- .../hadoop/hbase/thrift2/ThriftServer.java | 6 +- .../hadoop/hbase/thrift2/ThriftUtilities.java | 14 +-- .../hadoop/hbase/thrift/TestCallQueue.java | 8 +- .../hbase/thrift/TestThriftHttpServer.java | 2 +- .../hadoop/hbase/thrift/TestThriftServer.java | 32 +++--- .../hbase/thrift/TestThriftServerCmdLine.java | 4 +- .../TestThriftHBaseServiceHandler.java | 102 ++++++++--------- ...stThriftHBaseServiceHandlerWithLabels.java | 34 +++--- 883 files changed, 3176 insertions(+), 3576 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index e571aae567e..545ea61a75f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -254,10 +254,8 @@ public class HColumnDescriptor implements Comparable { */ public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false; - private final static Map DEFAULT_VALUES - = new HashMap(); - private final static Set RESERVED_KEYWORDS - = new HashSet(); + private final static Map DEFAULT_VALUES = new HashMap<>(); + private final static Set RESERVED_KEYWORDS = new HashSet<>(); static { DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER); @@ -293,15 +291,14 @@ public class HColumnDescriptor implements Comparable { private byte [] name; // Column metadata - private final Map values = - new HashMap(); + private final Map values = new HashMap<>(); /** * A map which holds the configuration specific to the column family. * The keys of the map have the same names as config keys and override the defaults with * cf-specific settings. Example usage may be for compactions, etc. */ - private final Map configuration = new HashMap(); + private final Map configuration = new HashMap<>(); /* * Cache the max versions rather than calculate it every time. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index da0d941671b..045f866af14 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -1167,7 +1167,7 @@ public class HRegionInfo implements Comparable { throw new IllegalArgumentException("Can't build an object with empty bytes array"); } DataInputBuffer in = new DataInputBuffer(); - List hris = new ArrayList(); + List hris = new ArrayList<>(); try { in.reset(bytes, offset, length); while (in.available() > 0) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 05891df408c..0a4d4caef80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -64,15 +64,14 @@ public class HTableDescriptor implements Comparable { * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY, * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc... */ - private final Map values = - new HashMap(); + private final Map values = new HashMap<>(); /** * A map which holds the configuration specific to the table. * The keys of the map have the same names as config keys and override the defaults with * table-specific settings. Example usage may be for compactions, etc. */ - private final Map configuration = new HashMap(); + private final Map configuration = new HashMap<>(); public static final String SPLIT_POLICY = "SPLIT_POLICY"; @@ -236,10 +235,8 @@ public class HTableDescriptor implements Comparable { public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; - private final static Map DEFAULT_VALUES - = new HashMap(); - private final static Set RESERVED_KEYWORDS - = new HashSet(); + private final static Map DEFAULT_VALUES = new HashMap<>(); + private final static Set RESERVED_KEYWORDS = new HashSet<>(); static { DEFAULT_VALUES.put(MAX_FILESIZE, @@ -278,7 +275,7 @@ public class HTableDescriptor implements Comparable { * Maps column family name to the respective HColumnDescriptors */ private final Map families = - new TreeMap(Bytes.BYTES_RAWCOMPARATOR); + new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); /** * INTERNAL Private constructor used internally creating table descriptors for @@ -933,8 +930,8 @@ public class HTableDescriptor implements Comparable { StringBuilder s = new StringBuilder(); // step 1: set partitioning and pruning - Set reservedKeys = new TreeSet(); - Set userKeys = new TreeSet(); + Set reservedKeys = new TreeSet<>(); + Set userKeys = new TreeSet<>(); for (Map.Entry entry : values.entrySet()) { if (entry.getKey() == null || entry.getKey().get() == null) continue; String key = Bytes.toString(entry.getKey().get()); @@ -1412,7 +1409,7 @@ public class HTableDescriptor implements Comparable { * @return The list of co-processors classNames */ public List getCoprocessors() { - List result = new ArrayList(this.values.entrySet().size()); + List result = new ArrayList<>(this.values.entrySet().size()); Matcher keyMatcher; for (Map.Entry e : this.values.entrySet()) { keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 61d4c66a63a..ee8d5fdd518 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -170,8 +170,7 @@ public class MetaTableAccessor { @Deprecated public static NavigableMap allTableRegions( Connection connection, final TableName tableName) throws IOException { - final NavigableMap regions = - new TreeMap(); + final NavigableMap regions = new TreeMap<>(); Visitor visitor = new TableVisitorBase(tableName) { @Override public boolean visitInternal(Result result) throws IOException { @@ -311,7 +310,7 @@ public class MetaTableAccessor { HRegionLocation location = getRegionLocation(connection, regionName); return location == null ? null - : new Pair(location.getRegionInfo(), location.getServerName()); + : new Pair<>(location.getRegionInfo(), location.getServerName()); } /** @@ -402,7 +401,7 @@ public class MetaTableAccessor { if (mergeA == null && mergeB == null) { return null; } - return new Pair(mergeA, mergeB); + return new Pair<>(mergeA, mergeB); } /** @@ -477,7 +476,7 @@ public class MetaTableAccessor { @Nullable static List getListOfHRegionInfos(final List> pairs) { if (pairs == null || pairs.isEmpty()) return null; - List result = new ArrayList(pairs.size()); + List result = new ArrayList<>(pairs.size()); for (Pair pair: pairs) { result.add(pair.getFirst()); } @@ -635,8 +634,7 @@ public class MetaTableAccessor { } for (HRegionLocation loc : current.getRegionLocations()) { if (loc != null) { - this.results.add(new Pair( - loc.getRegionInfo(), loc.getServerName())); + this.results.add(new Pair<>(loc.getRegionInfo(), loc.getServerName())); } } } @@ -658,7 +656,7 @@ public class MetaTableAccessor { public static NavigableMap getServerUserRegions(Connection connection, final ServerName serverName) throws IOException { - final NavigableMap hris = new TreeMap(); + final NavigableMap hris = new TreeMap<>(); // Fill the above hris map with entries from hbase:meta that have the passed // servername. CollectingVisitor v = new CollectingVisitor() { @@ -981,7 +979,7 @@ public class MetaTableAccessor { HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn()); if (regionInfo == null) return null; - List locations = new ArrayList(1); + List locations = new ArrayList<>(1); NavigableMap> familyMap = r.getNoVersionMap(); locations.add(getRegionLocation(r, regionInfo, 0)); @@ -1069,7 +1067,7 @@ public class MetaTableAccessor { HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER); HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER); - return new PairOfSameType(splitA, splitB); + return new PairOfSameType<>(splitA, splitB); } /** @@ -1083,7 +1081,7 @@ public class MetaTableAccessor { HRegionInfo mergeA = getHRegionInfo(data, HConstants.MERGEA_QUALIFIER); HRegionInfo mergeB = getHRegionInfo(data, HConstants.MERGEB_QUALIFIER); - return new PairOfSameType(mergeA, mergeB); + return new PairOfSameType<>(mergeA, mergeB); } /** @@ -1183,7 +1181,7 @@ public class MetaTableAccessor { * A {@link Visitor} that collects content out of passed {@link Result}. */ static abstract class CollectingVisitor implements Visitor { - final List results = new ArrayList(); + final List results = new ArrayList<>(); @Override public boolean visit(Result r) throws IOException { if (r == null || r.isEmpty()) return true; @@ -1426,7 +1424,7 @@ public class MetaTableAccessor { */ static void deleteFromMetaTable(final Connection connection, final Delete d) throws IOException { - List dels = new ArrayList(1); + List dels = new ArrayList<>(1); dels.add(d); deleteFromMetaTable(connection, dels); } @@ -1594,7 +1592,7 @@ public class MetaTableAccessor { public static void addRegionsToMeta(Connection connection, List regionInfos, int regionReplication, long ts) throws IOException { - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (HRegionInfo regionInfo : regionInfos) { if (RegionReplicaUtil.isDefaultReplica(regionInfo)) { Put put = makePutFromRegionInfo(regionInfo, ts); @@ -1970,7 +1968,7 @@ public class MetaTableAccessor { */ public static void deleteRegions(Connection connection, List regionsInfo, long ts) throws IOException { - List deletes = new ArrayList(regionsInfo.size()); + List deletes = new ArrayList<>(regionsInfo.size()); for (HRegionInfo hri: regionsInfo) { Delete e = new Delete(hri.getRegionName()); e.addFamily(getCatalogFamily(), ts); @@ -1991,7 +1989,7 @@ public class MetaTableAccessor { final List regionsToRemove, final List regionsToAdd) throws IOException { - List mutation = new ArrayList(); + List mutation = new ArrayList<>(); if (regionsToRemove != null) { for (HRegionInfo hri: regionsToRemove) { mutation.add(makeDeleteFromRegionInfo(hri)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index d16c90f5287..e884e51aa0f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -230,7 +230,7 @@ public class ServerLoad { */ public Map getRegionsLoad() { Map regionLoads = - new TreeMap(Bytes.BYTES_COMPARATOR); + new TreeMap<>(Bytes.BYTES_COMPARATOR); for (ClusterStatusProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) { RegionLoad regionLoad = new RegionLoad(rl); regionLoads.put(regionLoad.getName(), regionLoad); @@ -261,7 +261,7 @@ public class ServerLoad { public String[] getRsCoprocessors() { // Need a set to remove duplicates, but since generated Coprocessor class // is not Comparable, make it a Set instead of Set - TreeSet coprocessSet = new TreeSet(); + TreeSet coprocessSet = new TreeSet<>(); for (Coprocessor coprocessor : obtainServerLoadPB().getCoprocessorsList()) { coprocessSet.add(coprocessor.getName()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index fd2df935ac0..15497ce929e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -123,7 +123,7 @@ public class Append extends Mutation { byte [] family = CellUtil.cloneFamily(cell); List list = this.familyMap.get(family); if (list == null) { - list = new ArrayList(1); + list = new ArrayList<>(1); } // find where the new entry should be placed in the List list.add(cell); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 269d3167418..a65d327255b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -251,9 +251,8 @@ class AsyncProcess { boolean atLeastOne) throws InterruptedIOException { TableName tableName = task.getTableName(); RowAccess rows = task.getRowAccess(); - Map actionsByServer = - new HashMap(); - List retainedActions = new ArrayList(rows.size()); + Map actionsByServer = new HashMap<>(); + List retainedActions = new ArrayList<>(rows.size()); NonceGenerator ng = this.connection.getNonceGenerator(); long nonceGroup = ng.getNonceGroup(); // Currently, nonce group is per entire client. @@ -287,8 +286,8 @@ class AsyncProcess { } loc = locs.getDefaultRegionLocation(); } catch (IOException ex) { - locationErrors = new ArrayList(1); - locationErrorRows = new ArrayList(1); + locationErrors = new ArrayList<>(1); + locationErrorRows = new ArrayList<>(1); LOG.error("Failed to get region location ", ex); // This action failed before creating ars. Retain it, but do not add to submit list. // We will then add it to ars in an already-failed state. @@ -368,7 +367,7 @@ class AsyncProcess { */ private AsyncRequestFuture submitAll(AsyncProcessTask task) { RowAccess rows = task.getRowAccess(); - List actions = new ArrayList(rows.size()); + List actions = new ArrayList<>(rows.size()); // The position will be used by the processBatch to match the object array returned. int posInList = -1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java index c3caff898f9..41431bbd134 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java @@ -103,9 +103,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } } if (done) return; // Done within primary timeout - Map actionsByServer = - new HashMap(); - List unknownLocActions = new ArrayList(); + Map actionsByServer = new HashMap<>(); + List unknownLocActions = new ArrayList<>(); if (replicaGetIndices == null) { for (int i = 0; i < results.length; ++i) { addReplicaActions(i, actionsByServer, unknownLocActions); @@ -119,7 +118,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { sendMultiAction(actionsByServer, 1, null, unknownLocActions.isEmpty()); } if (!unknownLocActions.isEmpty()) { - actionsByServer = new HashMap(); + actionsByServer = new HashMap<>(); for (Action action : unknownLocActions) { addReplicaActionsAgain(action, actionsByServer); } @@ -374,7 +373,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { hasAnyReplicaGets = true; if (hasAnyNonReplicaReqs) { // Mixed case if (replicaGetIndices == null) { - replicaGetIndices = new ArrayList(actions.size() - 1); + replicaGetIndices = new ArrayList<>(actions.size() - 1); } replicaGetIndices.add(posInList); } @@ -384,7 +383,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (posInList > 0) { // Add all the previous requests to the index lists. We know they are all // replica-gets because this is the first non-multi-replica request in the list. - replicaGetIndices = new ArrayList(actions.size() - 1); + replicaGetIndices = new ArrayList<>(actions.size() - 1); for (int i = 0; i < posInList; ++i) { replicaGetIndices.add(i); } @@ -445,8 +444,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { * @param numAttempt - the current numAttempt (first attempt is 1) */ void groupAndSendMultiAction(List currentActions, int numAttempt) { - Map actionsByServer = - new HashMap(); + Map actionsByServer = new HashMap<>(); boolean isReplica = false; List unknownReplicaActions = null; @@ -463,7 +461,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (loc == null || loc.getServerName() == null) { if (isReplica) { if (unknownReplicaActions == null) { - unknownReplicaActions = new ArrayList(1); + unknownReplicaActions = new ArrayList<>(1); } unknownReplicaActions.add(action); } else { @@ -485,7 +483,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } if (hasUnknown) { - actionsByServer = new HashMap(); + actionsByServer = new HashMap<>(); for (Action action : unknownReplicaActions) { HRegionLocation loc = getReplicaLocationOrFail(action); if (loc == null) continue; @@ -616,8 +614,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } // group the actions by the amount of delay - Map actions = new HashMap(multiAction - .size()); + Map actions = new HashMap<>(multiAction.size()); // split up the actions for (Map.Entry> e : multiAction.actions.entrySet()) { @@ -630,7 +627,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } } - List toReturn = new ArrayList(actions.size()); + List toReturn = new ArrayList<>(actions.size()); for (DelayingRunner runner : actions.values()) { asyncProcess.incTaskCounters(runner.getActions().getRegions(), server); String traceText = "AsyncProcess.sendMultiAction"; @@ -736,7 +733,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { asyncProcess.connection.clearCaches(server); } int failed = 0, stopped = 0; - List toReplay = new ArrayList(); + List toReplay = new ArrayList<>(); for (Map.Entry> e : rsActions.actions.entrySet()) { byte[] regionName = e.getKey(); byte[] row = e.getValue().iterator().next().getAction().getRow(); @@ -850,7 +847,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { // - DoNotRetryIOException: we continue to retry for other actions // - RegionMovedException: we update the cache with the new region location - List toReplay = new ArrayList(); + List toReplay = new ArrayList<>(); Throwable throwable = null; int failureCount = 0; boolean canRetry = true; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java index 9bc651da137..08f52fc384b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java @@ -295,7 +295,7 @@ class AsyncRpcRetryingCallerFactory { } public AsyncBatchRpcRetryingCaller build() { - return new AsyncBatchRpcRetryingCaller(retryTimer, conn, tableName, actions, pauseNs, + return new AsyncBatchRpcRetryingCaller<>(retryTimer, conn, tableName, actions, pauseNs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java index b13c1277224..95b3484aa6b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java @@ -28,9 +28,9 @@ import java.util.List; class BatchErrors { private static final Log LOG = LogFactory.getLog(BatchErrors.class); - final List throwables = new ArrayList(); - final List actions = new ArrayList(); - final List addresses = new ArrayList(); + final List throwables = new ArrayList<>(); + final List actions = new ArrayList<>(); + final List addresses = new ArrayList<>(); public synchronized void add(Throwable ex, Row row, ServerName serverName) { if (row == null){ @@ -51,8 +51,8 @@ class BatchErrors { LOG.error("Exception occurred! Exception details: " + throwables + ";\nActions: " + actions); } - return new RetriesExhaustedWithDetailsException(new ArrayList(throwables), - new ArrayList(actions), new ArrayList(addresses)); + return new RetriesExhaustedWithDetailsException(new ArrayList<>(throwables), + new ArrayList<>(actions), new ArrayList<>(addresses)); } public synchronized void clear() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java index f632bcb9e4c..b1fc2da68f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java @@ -76,9 +76,9 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner { protected void initCache() { // concurrent cache cacheCapacity = calcCacheCapacity(); - cache = new LinkedBlockingQueue(); + cache = new LinkedBlockingQueue<>(); cacheSizeInBytes = new AtomicLong(0); - exceptionsQueue = new ConcurrentLinkedQueue(); + exceptionsQueue = new ConcurrentLinkedQueue<>(); prefetchRunnable = new PrefetchRunnable(); prefetchRunning = new AtomicBoolean(false); closingThreadId = new AtomicLong(NO_THREAD); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 9be4d6dd4d8..53e6dd811d4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -75,8 +75,9 @@ public abstract class ClientScanner extends AbstractClientScanner { * contain results if this scanner does not have enough partial results to form the complete * result. */ - protected final LinkedList partialResults = new LinkedList(); protected int partialResultsCellSizes = 0; + protected final LinkedList partialResults = new LinkedList<>(); + /** * The row for which we are accumulating partial Results (i.e. the row of the Results stored * inside partialResults). Changes to partialResultsRow and partialResults are kept in sync via @@ -313,7 +314,7 @@ public abstract class ClientScanner extends AbstractClientScanner { } protected void initSyncCache() { - cache = new LinkedList(); + cache = new LinkedList<>(); } protected Result nextWithSyncCache() throws IOException { @@ -587,7 +588,7 @@ public abstract class ClientScanner extends AbstractClientScanner { protected List getResultsToAddToCache(Result[] resultsFromServer, boolean heartbeatMessage) throws IOException { int resultSize = resultsFromServer != null ? resultsFromServer.length : 0; - List resultsToAddToCache = new ArrayList(resultSize); + List resultsToAddToCache = new ArrayList<>(resultSize); // If the caller has indicated in their scan that they are okay with seeing partial results, // then simply add all results to the list. Note allowPartial and setBatch are not same, we can diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index f3c02417c17..240587bffab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.util.Threads; @InterfaceAudience.Private class ClusterStatusListener implements Closeable { private static final Log LOG = LogFactory.getLog(ClusterStatusListener.class); - private final List deadServers = new ArrayList(); + private final List deadServers = new ArrayList<>(); protected final DeadServerHandler deadServerHandler; private final Listener listener; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 0fb9758b99a..adf14968491 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -415,7 +415,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { BlockingQueue workQueue = passedWorkQueue; if (workQueue == null) { workQueue = - new LinkedBlockingQueue(maxThreads * + new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); coreThreads = maxThreads; @@ -443,7 +443,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { this.metaLookupPool = getThreadPool( threads, threads, - "-metaLookup-shared-", new LinkedBlockingQueue()); + "-metaLookup-shared-", new LinkedBlockingQueue<>()); } } } @@ -661,7 +661,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { final boolean useCache, final boolean offlined) throws IOException { List regions = MetaTableAccessor .getTableRegions(this, tableName, !offlined); - final List locations = new ArrayList(); + final List locations = new ArrayList<>(); for (HRegionInfo regionInfo : regions) { RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true); if (list != null) { @@ -967,7 +967,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } // Map keyed by service name + regionserver to service stub implementation - private final ConcurrentMap stubs = new ConcurrentHashMap(); + private final ConcurrentMap stubs = new ConcurrentHashMap<>(); /** * State of the MasterService connection/setup. @@ -1012,8 +1012,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { */ static class ServerErrorTracker { // We need a concurrent map here, as we could have multiple threads updating it in parallel. - private final ConcurrentMap errorsByServer = - new ConcurrentHashMap(); + private final ConcurrentMap errorsByServer = new ConcurrentHashMap<>(); private final long canRetryUntil; private final int maxTries;// max number to try private final long startTrackingTime; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 9c6c1a5aa68..0eb1d2bb3a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -172,7 +172,7 @@ public class Delete extends Mutation implements Comparable { byte [] family = CellUtil.cloneFamily(kv); List list = familyMap.get(family); if (list == null) { - list = new ArrayList(1); + list = new ArrayList<>(1); } list.add(kv); familyMap.put(family, list); @@ -209,7 +209,7 @@ public class Delete extends Mutation implements Comparable { } List list = familyMap.get(family); if(list == null) { - list = new ArrayList(1); + list = new ArrayList<>(1); } else if(!list.isEmpty()) { list.clear(); } @@ -229,7 +229,7 @@ public class Delete extends Mutation implements Comparable { public Delete addFamilyVersion(final byte [] family, final long timestamp) { List list = familyMap.get(family); if(list == null) { - list = new ArrayList(1); + list = new ArrayList<>(1); } list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamilyVersion)); @@ -262,7 +262,7 @@ public class Delete extends Mutation implements Comparable { } List list = familyMap.get(family); if (list == null) { - list = new ArrayList(1); + list = new ArrayList<>(1); } list.add(new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.DeleteColumn)); @@ -297,7 +297,7 @@ public class Delete extends Mutation implements Comparable { } List list = familyMap.get(family); if(list == null) { - list = new ArrayList(1); + list = new ArrayList<>(1); } KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.Delete); list.add(kv); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index 947b54ad1a6..a581ed55510 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -76,8 +76,7 @@ public class Get extends Query private int storeOffset = 0; private boolean checkExistenceOnly = false; private boolean closestRowBefore = false; - private Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); + private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Create a Get operation for the specified row. @@ -184,7 +183,7 @@ public class Get extends Query public Get addColumn(byte [] family, byte [] qualifier) { NavigableSet set = familyMap.get(family); if(set == null) { - set = new TreeSet(Bytes.BYTES_COMPARATOR); + set = new TreeSet<>(Bytes.BYTES_COMPARATOR); } if (qualifier == null) { qualifier = HConstants.EMPTY_BYTE_ARRAY; @@ -399,8 +398,8 @@ public class Get extends Query */ @Override public Map getFingerprint() { - Map map = new HashMap(); - List families = new ArrayList(this.familyMap.entrySet().size()); + Map map = new HashMap<>(); + List families = new ArrayList<>(this.familyMap.entrySet().size()); map.put("families", families); for (Map.Entry> entry : this.familyMap.entrySet()) { @@ -422,13 +421,13 @@ public class Get extends Query Map map = getFingerprint(); // replace the fingerprint's simple list of families with a // map from column families to lists of qualifiers and kv details - Map> columns = new HashMap>(); + Map> columns = new HashMap<>(); map.put("families", columns); // add scalar information first map.put("row", Bytes.toStringBinary(this.row)); map.put("maxVersions", this.maxVersions); map.put("cacheBlocks", this.cacheBlocks); - List timeRange = new ArrayList(2); + List timeRange = new ArrayList<>(2); timeRange.add(this.tr.getMin()); timeRange.add(this.tr.getMax()); map.put("timeRange", timeRange); @@ -436,7 +435,7 @@ public class Get extends Query // iterate through affected families and add details for (Map.Entry> entry : this.familyMap.entrySet()) { - List familyList = new ArrayList(); + List familyList = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), familyList); if(entry.getValue() == null) { colCount++; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index c68d3bb00b1..5265616c359 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -618,7 +618,7 @@ public class HBaseAdmin implements Admin { */ @Override public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException { - List failed = new LinkedList(); + List failed = new LinkedList<>(); for (HTableDescriptor table : listTables(pattern)) { try { deleteTable(table.getTableName()); @@ -743,7 +743,7 @@ public class HBaseAdmin implements Admin { @Override public HTableDescriptor[] enableTables(Pattern pattern) throws IOException { - List failed = new LinkedList(); + List failed = new LinkedList<>(); for (HTableDescriptor table : listTables(pattern)) { if (isTableDisabled(table.getTableName())) { try { @@ -807,7 +807,7 @@ public class HBaseAdmin implements Admin { @Override public HTableDescriptor[] disableTables(Pattern pattern) throws IOException { - List failed = new LinkedList(); + List failed = new LinkedList<>(); for (HTableDescriptor table : listTables(pattern)) { if (isTableEnabled(table.getTableName())) { try { @@ -1098,8 +1098,7 @@ public class HBaseAdmin implements Admin { LOG.info("Table is disabled: " + tableName.getNameAsString()); return; } - execProcedure("flush-table-proc", tableName.getNameAsString(), - new HashMap()); + execProcedure("flush-table-proc", tableName.getNameAsString(), new HashMap<>()); } @Override @@ -1796,8 +1795,7 @@ public class HBaseAdmin implements Admin { Pair pair = MetaTableAccessor.getRegion(connection, regionName); if (pair == null) { - final AtomicReference> result = - new AtomicReference>(null); + final AtomicReference> result = new AtomicReference<>(null); final String encodedName = Bytes.toString(regionName); MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { @Override @@ -1820,7 +1818,7 @@ public class HBaseAdmin implements Admin { } } if (!matched) return true; - result.set(new Pair(info, sn)); + result.set(new Pair<>(info, sn)); return false; // found the region, stop } }; @@ -1954,7 +1952,7 @@ public class HBaseAdmin implements Admin { AdminService.BlockingInterface admin = this.connection.getAdmin(sn); HBaseRpcController controller = rpcControllerFactory.newController(); List regionLoads = ProtobufUtil.getRegionLoad(controller, admin, tableName); - Map resultMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map resultMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (RegionLoad regionLoad : regionLoads) { resultMap.put(regionLoad.getName(), regionLoad); } @@ -2279,7 +2277,7 @@ public class HBaseAdmin implements Admin { */ private HTableDescriptor getTableDescriptorByTableName(TableName tableName) throws IOException { - List tableNames = new ArrayList(1); + List tableNames = new ArrayList<>(1); tableNames.add(tableName); HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames); @@ -2295,7 +2293,7 @@ public class HBaseAdmin implements Admin { @Override public HTableDescriptor[] getTableDescriptors(List names) throws IOException { - List tableNames = new ArrayList(names.size()); + List tableNames = new ArrayList<>(names.size()); for(String name : names) { tableNames.add(TableName.valueOf(name)); } @@ -2829,7 +2827,7 @@ public class HBaseAdmin implements Admin { .getCompletedSnapshots(getRpcController(), GetCompletedSnapshotsRequest.newBuilder().build()) .getSnapshotsList(); - List result = new ArrayList(snapshotsList.size()); + List result = new ArrayList<>(snapshotsList.size()); for (HBaseProtos.SnapshotDescription snapshot : snapshotsList) { result.add(ProtobufUtil.createSnapshotDesc(snapshot)); } @@ -2845,7 +2843,7 @@ public class HBaseAdmin implements Admin { @Override public List listSnapshots(Pattern pattern) throws IOException { - List matched = new LinkedList(); + List matched = new LinkedList<>(); List snapshots = listSnapshots(); for (SnapshotDescription snapshot : snapshots) { if (pattern.matcher(snapshot.getName()).matches()) { @@ -2866,7 +2864,7 @@ public class HBaseAdmin implements Admin { Pattern snapshotNamePattern) throws IOException { TableName[] tableNames = listTableNames(tableNamePattern); - List tableSnapshots = new LinkedList(); + List tableSnapshots = new LinkedList<>(); List snapshots = listSnapshots(snapshotNamePattern); List listOfTableNames = Arrays.asList(tableNames); @@ -3985,7 +3983,7 @@ public class HBaseAdmin implements Admin { @Override public void drainRegionServers(List servers) throws IOException { - final List pbServers = new ArrayList(servers.size()); + final List pbServers = new ArrayList<>(servers.size()); for (ServerName server : servers) { // Parse to ServerName to do simple validation. ServerName.parseServerName(server.toString()); @@ -4010,7 +4008,7 @@ public class HBaseAdmin implements Admin { @Override public List rpcCall() throws ServiceException { ListDrainingRegionServersRequest req = ListDrainingRegionServersRequest.newBuilder().build(); - List servers = new ArrayList(); + List servers = new ArrayList<>(); for (HBaseProtos.ServerName server : master.listDrainingRegionServers(null, req) .getServerNameList()) { servers.add(ProtobufUtil.toServerName(server)); @@ -4022,7 +4020,7 @@ public class HBaseAdmin implements Admin { @Override public void removeDrainFromRegionServers(List servers) throws IOException { - final List pbServers = new ArrayList(servers.size()); + final List pbServers = new ArrayList<>(servers.size()); for (ServerName server : servers) { pbServers.add(ProtobufUtil.toServerName(server)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java index 4d2311dddc9..f2c57461e6c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java @@ -142,7 +142,7 @@ public class HRegionLocator implements RegionLocator { @VisibleForTesting List listRegionLocations() throws IOException { - final List regions = new ArrayList(); + final List regions = new ArrayList<>(); MetaTableAccessor.Visitor visitor = new MetaTableAccessor.TableVisitorBase(tableName) { @Override public boolean visitInternal(Result result) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 0c383fce0f2..3bdbed516d1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -143,7 +143,7 @@ public class HTable implements Table { // we only create as many Runnables as there are region servers. It means // it also scales when new region servers are added. ThreadPoolExecutor pool = new ThreadPoolExecutor(corePoolSize, maxThreads, keepAliveTime, - TimeUnit.SECONDS, new SynchronousQueue(), Threads.newDaemonThreadFactory("htable")); + TimeUnit.SECONDS, new SynchronousQueue<>(), Threads.newDaemonThreadFactory("htable")); pool.allowCoreThreadTimeOut(true); return pool; } @@ -309,8 +309,8 @@ public class HTable implements Table { "Invalid range: " + Bytes.toStringBinary(startKey) + " > " + Bytes.toStringBinary(endKey)); } - List keysInRange = new ArrayList(); - List regionsInRange = new ArrayList(); + List keysInRange = new ArrayList<>(); + List regionsInRange = new ArrayList<>(); byte[] currentKey = startKey; do { HRegionLocation regionLocation = getRegionLocator().getRegionLocation(currentKey, reload); @@ -320,8 +320,7 @@ public class HTable implements Table { } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0 || (includeEndKey && Bytes.compareTo(currentKey, endKey) == 0))); - return new Pair, List>(keysInRange, - regionsInRange); + return new Pair<>(keysInRange, regionsInRange); } /** @@ -915,7 +914,7 @@ public class HTable implements Table { if (gets.isEmpty()) return new boolean[]{}; if (gets.size() == 1) return new boolean[]{exists(gets.get(0))}; - ArrayList exists = new ArrayList(gets.size()); + ArrayList exists = new ArrayList<>(gets.size()); for (Get g: gets){ Get ge = new Get(g); ge.setCheckExistenceOnly(true); @@ -1099,8 +1098,7 @@ public class HTable implements Table { final Batch.Callback callback) throws ServiceException, Throwable { // get regions covered by the row range List keys = getStartKeysInRange(startKey, endKey); - Map> futures = - new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> futures = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (final byte[] r : keys) { final RegionCoprocessorRpcChannel channel = new RegionCoprocessorRpcChannel(connection, tableName, r); @@ -1245,10 +1243,8 @@ public class HTable implements Table { return; } - List execs = - new ArrayList(keys.size()); - final Map execsByRow = - new TreeMap(Bytes.BYTES_COMPARATOR); + List execs = new ArrayList<>(keys.size()); + final Map execsByRow = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < keys.size(); i++) { final byte[] rowKey = keys.get(i); final byte[] region = regions.get(i).getRegionInfo().getRegionName(); @@ -1260,9 +1256,9 @@ public class HTable implements Table { // tracking for any possible deserialization errors on success callback // TODO: it would be better to be able to reuse AsyncProcess.BatchErrors here - final List callbackErrorExceptions = new ArrayList(); - final List callbackErrorActions = new ArrayList(); - final List callbackErrorServers = new ArrayList(); + final List callbackErrorExceptions = new ArrayList<>(); + final List callbackErrorActions = new ArrayList<>(); + final List callbackErrorServers = new ArrayList<>(); Object[] results = new Object[execs.size()]; AsyncProcess asyncProcess = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 27393ba1f31..f3a58adea4c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -169,7 +169,7 @@ public class HTableMultiplexer { // Create the failed puts list if necessary if (failedPuts == null) { - failedPuts = new ArrayList(); + failedPuts = new ArrayList<>(); } // Add the put to the failed puts list failedPuts.add(put); @@ -288,10 +288,10 @@ public class HTableMultiplexer { this.totalFailedPutCounter = 0; this.maxLatency = 0; this.overallAverageLatency = 0; - this.serverToBufferedCounterMap = new HashMap(); - this.serverToFailedCounterMap = new HashMap(); - this.serverToAverageLatencyMap = new HashMap(); - this.serverToMaxLatencyMap = new HashMap(); + this.serverToBufferedCounterMap = new HashMap<>(); + this.serverToFailedCounterMap = new HashMap<>(); + this.serverToAverageLatencyMap = new HashMap<>(); + this.serverToMaxLatencyMap = new HashMap<>(); this.initialize(serverToFlushWorkerMap); } @@ -412,7 +412,7 @@ public class HTableMultiplexer { } public synchronized SimpleEntry getComponents() { - return new SimpleEntry(sum, count); + return new SimpleEntry<>(sum, count); } public synchronized void reset() { @@ -614,7 +614,7 @@ public class HTableMultiplexer { failedCount--; } else { if (failed == null) { - failed = new ArrayList(); + failed = new ArrayList<>(); } failed.add(processingList.get(i)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index 9538361b2d6..eb1cbc5a749 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -204,10 +204,9 @@ public class Increment extends Mutation implements Comparable { */ public Map> getFamilyMapOfLongs() { NavigableMap> map = super.getFamilyCellMap(); - Map> results = - new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry> entry: map.entrySet()) { - NavigableMap longs = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Cell cell: entry.getValue()) { longs.put(CellUtil.cloneQualifier(cell), Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index 64b1661d15a..ea64900df9a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -193,7 +193,7 @@ public class MetricsConnection implements StatisticTrackable { @VisibleForTesting protected ConcurrentHashMap> serverStats - = new ConcurrentHashMap>(); + = new ConcurrentHashMap<>(); public void updateServerStats(ServerName serverName, byte[] regionName, Object r) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java index dc4ec6271ad..a4aa71d8208 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java @@ -82,7 +82,7 @@ public final class MultiAction { public void add(byte[] regionName, List actionList){ List rsActions = actions.get(regionName); if (rsActions == null) { - rsActions = new ArrayList(actionList.size()); + rsActions = new ArrayList<>(actionList.size()); actions.put(regionName, rsActions); } rsActions.addAll(actionList); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 937e1b55087..7d6744f5797 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -41,7 +41,7 @@ public class MultiResponse extends AbstractResponse { * It's a part of the protobuf definition. */ private Map exceptions = - new TreeMap(Bytes.BYTES_COMPARATOR); + new TreeMap<>(Bytes.BYTES_COMPARATOR); public MultiResponse() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index c4adf347c5e..38a1950e5e6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -107,7 +107,7 @@ class MultiServerCallable extends CancellableRegionServerCallable HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, regionName)); if (this.cellBlock) { // Pre-size. Presume at least a KV per Action. There are likely more. - if (cells == null) cells = new ArrayList(countOfActions); + if (cells == null) cells = new ArrayList<>(countOfActions); // Send data in cellblocks. The call to buildNoDataMultiRequest will skip RowMutations. // They have already been handled above. Guess at count of cells regionActionBuilder = RequestConverter.buildNoDataRegionAction(regionName, actions, cells, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index 53631d95de5..fb55fddcbbf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -92,8 +92,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C protected Durability durability = Durability.USE_DEFAULT; // A Map sorted by column family. - protected NavigableMap> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); + protected NavigableMap> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); @Override public CellScanner cellScanner() { @@ -110,7 +109,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C List getCellList(byte[] family) { List list = this.familyMap.get(family); if (list == null) { - list = new ArrayList(); + list = new ArrayList<>(); } return list; } @@ -158,8 +157,8 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C */ @Override public Map getFingerprint() { - Map map = new HashMap(); - List families = new ArrayList(this.familyMap.entrySet().size()); + Map map = new HashMap<>(); + List families = new ArrayList<>(this.familyMap.entrySet().size()); // ideally, we would also include table information, but that information // is not stored in each Operation instance. map.put("families", families); @@ -182,15 +181,14 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C Map map = getFingerprint(); // replace the fingerprint's simple list of families with a // map from column families to lists of qualifiers and kv details - Map>> columns = - new HashMap>>(); + Map>> columns = new HashMap<>(); map.put("families", columns); map.put("row", Bytes.toStringBinary(this.row)); int colCount = 0; // iterate through all column families affected for (Map.Entry> entry : this.familyMap.entrySet()) { // map from this family to details for each cell affected within the family - List> qualifierDetails = new ArrayList>(); + List> qualifierDetails = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); colCount += entry.getValue().size(); if (maxCols <= 0) { @@ -220,14 +218,14 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } private static Map cellToStringMap(Cell c) { - Map stringMap = new HashMap(); + Map stringMap = new HashMap<>(); stringMap.put("qualifier", Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength())); stringMap.put("timestamp", c.getTimestamp()); stringMap.put("vlen", c.getValueLength()); List tags = CellUtil.getTags(c); if (tags != null) { - List tagsString = new ArrayList(tags.size()); + List tagsString = new ArrayList<>(tags.size()); for (Tag t : tags) { tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(TagUtil.cloneValue(t))); } @@ -317,7 +315,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C * @return the set of clusterIds that have consumed the mutation */ public List getClusterIds() { - List clusterIds = new ArrayList(); + List clusterIds = new ArrayList<>(); byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS); if(bytes != null) { ByteArrayDataInput in = ByteStreams.newDataInput(bytes); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java index 9fdd5775ef7..cc863b9683a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java @@ -44,7 +44,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri } if (attributes == null) { - attributes = new HashMap(); + attributes = new HashMap<>(); } if (value == null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java index 448e5b1f53e..a29a662d1dc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java @@ -75,8 +75,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { // Keeps track of failures when we cannot talk to a server. Helps in // fast failing clients if the server is down for a long time. - protected final ConcurrentMap repeatedFailuresMap = - new ConcurrentHashMap(); + protected final ConcurrentMap repeatedFailuresMap = new ConcurrentHashMap<>(); // We populate repeatedFailuresMap every time there is a failure. So, to // keep it from growing unbounded, we garbage collect the failure information @@ -90,8 +89,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { // fast fail mode for any reason. private long fastFailClearingTimeMilliSec; - private final ThreadLocal threadRetryingInFastFailMode = - new ThreadLocal(); + private final ThreadLocal threadRetryingInFastFailMode = new ThreadLocal<>(); public PreemptiveFastFailInterceptor(Configuration conf) { this.fastFailThresholdMilliSec = conf.getLong( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index a6ebd038828..701dceb5657 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -161,9 +161,9 @@ public class Put extends Mutation implements HeapSize, Comparable { */ public Put(Put putToCopy) { this(putToCopy.getRow(), putToCopy.ts); - this.familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); + this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(Map.Entry> entry: putToCopy.getFamilyCellMap().entrySet()) { - this.familyMap.put(entry.getKey(), new ArrayList(entry.getValue())); + this.familyMap.put(entry.getKey(), new ArrayList<>(entry.getValue())); } this.durability = putToCopy.durability; for (Map.Entry entry : putToCopy.getAttributesMap().entrySet()) { @@ -464,7 +464,7 @@ public class Put extends Mutation implements HeapSize, Comparable { * returns an empty list if one doesn't exist for the given family. */ public List get(byte[] family, byte[] qualifier) { - List filteredList = new ArrayList(); + List filteredList = new ArrayList<>(); for (Cell cell: getCellList(family)) { if (CellUtil.matchingQualifier(cell, qualifier)) { filteredList.add(cell); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index b4c24fe59d0..4752d70108f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -94,7 +94,7 @@ public class Result implements CellScannable, CellScanner { private transient NavigableMap>> familyMap = null; - private static ThreadLocal localBuffer = new ThreadLocal(); + private static ThreadLocal localBuffer = new ThreadLocal<>(); private static final int PAD_WIDTH = 128; public static final Result EMPTY_RESULT = new Result(true); @@ -247,7 +247,7 @@ public class Result implements CellScannable, CellScanner { * did not exist in the result set */ public List getColumnCells(byte [] family, byte [] qualifier) { - List result = new ArrayList(); + List result = new ArrayList<>(); Cell [] kvs = rawCells(); @@ -662,12 +662,10 @@ public class Result implements CellScannable, CellScanner { if(isEmpty()) { return null; } - NavigableMap> returnMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); + NavigableMap> returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(Map.Entry>> familyEntry : familyMap.entrySet()) { - NavigableMap qualifierMap = - new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap qualifierMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(Map.Entry> qualifierEntry : familyEntry.getValue().entrySet()) { byte [] value = @@ -693,8 +691,7 @@ public class Result implements CellScannable, CellScanner { if(isEmpty()) { return null; } - NavigableMap returnMap = - new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap returnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); NavigableMap> qualifierMap = familyMap.get(family); if(qualifierMap == null) { @@ -797,7 +794,7 @@ public class Result implements CellScannable, CellScanner { */ public static Result createCompleteResult(List partialResults) throws IOException { - List cells = new ArrayList(); + List cells = new ArrayList<>(); boolean stale = false; byte[] prevRow = null; byte[] currentRow = null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java index 2848c9df743..50c3d2ca7a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java @@ -167,7 +167,7 @@ public class ResultBoundedCompletionService { public void submit(RetryingCallable task, int callTimeout, int id) { - QueueingFuture newFuture = new QueueingFuture(task, callTimeout, id); + QueueingFuture newFuture = new QueueingFuture<>(task, callTimeout, id); executor.execute(Trace.wrap(newFuture)); tasks[id] = newFuture; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java index f24e614e5cc..8b092224f49 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java @@ -110,7 +110,7 @@ extends RetriesExhaustedException { String s = getDesc(classifyExs(exceptions)); StringBuilder addrs = new StringBuilder(s); addrs.append("servers with issues: "); - Set uniqAddr = new HashSet(); + Set uniqAddr = new HashSet<>(); uniqAddr.addAll(hostnamePort); for(String addr : uniqAddr) { @@ -143,7 +143,7 @@ extends RetriesExhaustedException { public static Map classifyExs(List ths) { - Map cls = new HashMap(); + Map cls = new HashMap<>(); for (Throwable t : ths) { if (t == null) continue; String name = ""; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java index 6e5235b712a..1d46ab480aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java @@ -139,7 +139,7 @@ public class ReversedScannerCallable extends ScannerCallable { + Bytes.toStringBinary(startKey) + " > " + Bytes.toStringBinary(endKey)); } - List regionList = new ArrayList(); + List regionList = new ArrayList<>(); byte[] currentKey = startKey; do { RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(reload, id, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java index cc8c23ae73c..41a514ac02e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java @@ -84,7 +84,7 @@ public class RpcRetryingCallerFactory { public RpcRetryingCaller newCaller(int rpcTimeout) { // We store the values in the factory instance. This way, constructing new objects // is cheap as it does not require parsing a complex structure. - RpcRetryingCaller caller = new RpcRetryingCallerImpl(pause, pauseForCQTBE, retries, + RpcRetryingCaller caller = new RpcRetryingCallerImpl<>(pause, pauseForCQTBE, retries, interceptor, startLogErrorsCnt, rpcTimeout); return caller; } @@ -95,7 +95,7 @@ public class RpcRetryingCallerFactory { public RpcRetryingCaller newCaller() { // We store the values in the factory instance. This way, constructing new objects // is cheap as it does not require parsing a complex structure. - RpcRetryingCaller caller = new RpcRetryingCallerImpl(pause, pauseForCQTBE, retries, + RpcRetryingCaller caller = new RpcRetryingCallerImpl<>(pause, pauseForCQTBE, retries, interceptor, startLogErrorsCnt, rpcTimeout); return caller; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java index 6450adfc58d..3f65e6e1777 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java @@ -94,8 +94,7 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { @Override public T callWithRetries(RetryingCallable callable, int callTimeout) throws IOException, RuntimeException { - List exceptions = - new ArrayList(); + List exceptions = new ArrayList<>(); tracker.start(); context.clear(); for (int tries = 0;; tries++) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index 316fad1ba3f..00502694b06 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -173,7 +173,7 @@ public class RpcRetryingCallerWithReadReplicas { RegionLocations rl = getRegionLocations(true, (isTargetReplicaSpecified ? get.getReplicaId() : RegionReplicaUtil.DEFAULT_REPLICA_ID), cConnection, tableName, get.getRow()); final ResultBoundedCompletionService cs = - new ResultBoundedCompletionService(this.rpcRetryingCallerFactory, pool, rl.size()); + new ResultBoundedCompletionService<>(this.rpcRetryingCallerFactory, pool, rl.size()); int startIndex = 0; int endIndex = rl.size(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 0f9a9af0307..a7d81af6cba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -143,8 +143,7 @@ public class Scan extends Query { private long maxResultSize = -1; private boolean cacheBlocks = true; private boolean reversed = false; - private Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); + private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); private Boolean asyncPrefetch = null; /** @@ -339,7 +338,7 @@ public class Scan extends Query { public Scan addColumn(byte [] family, byte [] qualifier) { NavigableSet set = familyMap.get(family); if(set == null) { - set = new TreeSet(Bytes.BYTES_COMPARATOR); + set = new TreeSet<>(Bytes.BYTES_COMPARATOR); } if (qualifier == null) { qualifier = HConstants.EMPTY_BYTE_ARRAY; @@ -889,8 +888,8 @@ public class Scan extends Query { */ @Override public Map getFingerprint() { - Map map = new HashMap(); - List families = new ArrayList(); + Map map = new HashMap<>(); + List families = new ArrayList<>(); if(this.familyMap.isEmpty()) { map.put("families", "ALL"); return map; @@ -916,8 +915,7 @@ public class Scan extends Query { // start with the fingerpring map and build on top of it Map map = getFingerprint(); // map from families to column list replaces fingerprint's list of families - Map> familyColumns = - new HashMap>(); + Map> familyColumns = new HashMap<>(); map.put("families", familyColumns); // add scalar information first map.put("startRow", Bytes.toStringBinary(this.startRow)); @@ -928,7 +926,7 @@ public class Scan extends Query { map.put("maxResultSize", this.maxResultSize); map.put("cacheBlocks", this.cacheBlocks); map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand); - List timeRange = new ArrayList(2); + List timeRange = new ArrayList<>(2); timeRange.add(this.tr.getMin()); timeRange.add(this.tr.getMax()); map.put("timeRange", timeRange); @@ -936,7 +934,7 @@ public class Scan extends Query { // iterate through affected families and list out up to maxCols columns for (Map.Entry> entry : this.familyMap.entrySet()) { - List columns = new ArrayList(); + List columns = new ArrayList<>(); familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); if(entry.getValue() == null) { colCount++; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java index 101e8daf552..6b6acf02b3c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java @@ -69,7 +69,7 @@ class ScannerCallableWithReplicas implements RetryingCallable { private final TableName tableName; private Configuration conf; private int scannerTimeout; - private Set outstandingCallables = new HashSet(); + private Set outstandingCallables = new HashSet<>(); private boolean someRPCcancelled = false; //required for testing purposes only public ScannerCallableWithReplicas(TableName tableName, ClusterConnection cConnection, @@ -149,7 +149,7 @@ class ScannerCallableWithReplicas implements RetryingCallable { // allocate a boundedcompletion pool of some multiple of number of replicas. // We want to accomodate some RPCs for redundant replica scans (but are still in progress) ResultBoundedCompletionService> cs = - new ResultBoundedCompletionService>( + new ResultBoundedCompletionService<>( RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf), pool, rl.size() * 5); @@ -359,7 +359,7 @@ class ScannerCallableWithReplicas implements RetryingCallable { return null; } Result[] res = this.caller.callWithoutRetries(this.callable, callTimeout); - return new Pair(res, this.callable); + return new Pair<>(res, this.callable); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java index f66e7fcc203..f78ca41f395 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerStatisticTracker.java @@ -35,8 +35,7 @@ import org.apache.hadoop.hbase.client.backoff.ServerStatistics; @InterfaceAudience.Private public class ServerStatisticTracker implements StatisticTrackable { - private final ConcurrentHashMap stats = - new ConcurrentHashMap(); + private final ConcurrentHashMap stats = new ConcurrentHashMap<>(); @Override public void updateRegionStats(ServerName server, byte[] region, RegionLoadStats currentStats) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java index e33e2bca1fa..a953e8c716c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java @@ -30,8 +30,7 @@ import java.util.TreeMap; @InterfaceAudience.Private public class ServerStatistics { - private Map - stats = new TreeMap(Bytes.BYTES_COMPARATOR); + private Map stats = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java index 4b3e0ce5fd1..7171a94a052 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java @@ -35,7 +35,7 @@ public class ServerSideScanMetrics { /** * Hash to hold the String -> Atomic Long mappings for each metric */ - private final Map counters = new HashMap(); + private final Map counters = new HashMap<>(); /** * Create a new counter with the specified name diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 4e74d87b66a..c7f040e345e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -273,7 +273,7 @@ public class ReplicationAdmin implements Closeable { @Deprecated public Map listPeerConfigs() throws IOException { List peers = this.admin.listReplicationPeers(); - Map result = new TreeMap(); + Map result = new TreeMap<>(); for (ReplicationPeerDescription peer : peers) { result.put(peer.getPeerId(), peer.getPeerConfig()); } @@ -343,7 +343,7 @@ public class ReplicationAdmin implements Closeable { if (cfs == null || appendCfs == null || appendCfs.isEmpty()) { preTableCfs.put(table, null); } else { - Set cfSet = new HashSet(cfs); + Set cfSet = new HashSet<>(cfs); cfSet.addAll(appendCfs); preTableCfs.put(table, Lists.newArrayList(cfSet)); } @@ -400,7 +400,7 @@ public class ReplicationAdmin implements Closeable { if (cfs == null && (removeCfs == null || removeCfs.isEmpty())) { preTableCfs.remove(table); } else if (cfs != null && (removeCfs != null && !removeCfs.isEmpty())) { - Set cfSet = new HashSet(cfs); + Set cfSet = new HashSet<>(cfs); cfSet.removeAll(removeCfs); if (cfSet.isEmpty()) { preTableCfs.remove(table); @@ -484,7 +484,7 @@ public class ReplicationAdmin implements Closeable { tableCFs.getColumnFamilyMap() .forEach( (cf, scope) -> { - HashMap replicationEntry = new HashMap(); + HashMap replicationEntry = new HashMap<>(); replicationEntry.put(TNAME, table); replicationEntry.put(CFNAME, cf); replicationEntry.put(REPLICATIONTYPE, @@ -531,7 +531,7 @@ public class ReplicationAdmin implements Closeable { if (peers == null || peers.size() <= 0) { return null; } - List listOfPeers = new ArrayList(peers.size()); + List listOfPeers = new ArrayList<>(peers.size()); for (Entry peerEntry : peers.entrySet()) { String peerId = peerEntry.getKey(); try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java index 29652196cf7..2d5539cc13c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java @@ -215,7 +215,7 @@ public final class ReplicationSerDeHelper { if (tableCFs == null || tableCFs.length == 0) { return null; } - Map> tableCFsMap = new HashMap>(); + Map> tableCFsMap = new HashMap<>(); for (int i = 0, n = tableCFs.length; i < n; i++) { ReplicationProtos.TableCF tableCF = tableCFs[i]; List families = new ArrayList<>(); @@ -283,7 +283,7 @@ public final class ReplicationSerDeHelper { } List namespacesList = peer.getNamespacesList(); if (namespacesList != null && namespacesList.size() != 0) { - Set namespaces = new HashSet(); + Set namespaces = new HashSet<>(); for (ByteString namespace : namespacesList) { namespaces.add(namespace.toStringUtf8()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java index e74797def9d..bbc31ec5774 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java @@ -177,7 +177,7 @@ public abstract class CompareFilter extends FilterBase { " can only be used with EQUAL and NOT_EQUAL"); } } - ArrayList arguments = new ArrayList(2); + ArrayList arguments = new ArrayList<>(2); arguments.add(compareOp); arguments.add(comparator); return arguments; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java index 287a0904175..d82eaec909b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java @@ -54,7 +54,7 @@ public class DependentColumnFilter extends CompareFilter { protected byte[] columnQualifier; protected boolean dropDependentColumn; - protected Set stampSet = new HashSet(); + protected Set stampSet = new HashSet<>(); /** * Build a dependent column filter with value checking diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index c10d18c7901..04eba0ccad2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -453,7 +453,7 @@ final public class FilterList extends FilterBase { throw new DeserializationException(e); } - List rowFilters = new ArrayList(proto.getFiltersCount()); + List rowFilters = new ArrayList<>(proto.getFiltersCount()); try { List filtersList = proto.getFiltersList(); int listSize = filtersList.size(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java index 82d6c57abdf..6b202ad4742 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java @@ -108,7 +108,7 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { throw new DeserializationException(e); } - TreeSet qualifiers = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet qualifiers = new TreeSet<>(Bytes.BYTES_COMPARATOR); for (ByteString qualifier : proto.getQualifiersList()) { qualifiers.add(qualifier.toByteArray()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java index 5fc12b95bff..65c2a6148e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -83,7 +83,7 @@ public class FuzzyRowFilter extends FilterBase { p = fuzzyKeysData.get(i); if (p.getFirst().length != p.getSecond().length) { Pair readable = - new Pair(Bytes.toStringBinary(p.getFirst()), Bytes.toStringBinary(p + new Pair<>(Bytes.toStringBinary(p.getFirst()), Bytes.toStringBinary(p .getSecond())); throw new IllegalArgumentException("Fuzzy pair lengths do not match: " + readable); } @@ -191,8 +191,7 @@ public class FuzzyRowFilter extends FilterBase { private boolean initialized = false; RowTracker() { - nextRows = - new PriorityQueue>>(fuzzyKeysData.size(), + nextRows = new PriorityQueue<>(fuzzyKeysData.size(), new Comparator>>() { @Override public int compare(Pair> o1, @@ -239,7 +238,7 @@ public class FuzzyRowFilter extends FilterBase { getNextForFuzzyRule(isReversed(), currentCell.getRowArray(), currentCell.getRowOffset(), currentCell.getRowLength(), fuzzyData.getFirst(), fuzzyData.getSecond()); if (nextRowKeyCandidate != null) { - nextRows.add(new Pair>(nextRowKeyCandidate, fuzzyData)); + nextRows.add(new Pair<>(nextRowKeyCandidate, fuzzyData)); } } @@ -278,12 +277,12 @@ public class FuzzyRowFilter extends FilterBase { throw new DeserializationException(e); } int count = proto.getFuzzyKeysDataCount(); - ArrayList> fuzzyKeysData = new ArrayList>(count); + ArrayList> fuzzyKeysData = new ArrayList<>(count); for (int i = 0; i < count; ++i) { BytesBytesPair current = proto.getFuzzyKeysData(i); byte[] keyBytes = current.getFirst().toByteArray(); byte[] keyMeta = current.getSecond().toByteArray(); - fuzzyKeysData.add(new Pair(keyBytes, keyMeta)); + fuzzyKeysData.add(new Pair<>(keyBytes, keyMeta)); } return new FuzzyRowFilter(fuzzyKeysData); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java index 2cc754ac9d5..77fbaf46dd5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java @@ -174,7 +174,7 @@ public class MultiRowRangeFilter extends FilterBase { } int length = proto.getRowRangeListCount(); List rangeProtos = proto.getRowRangeListList(); - List rangeList = new ArrayList(length); + List rangeList = new ArrayList<>(length); for (FilterProtos.RowRange rangeProto : rangeProtos) { RowRange range = new RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow() .toByteArray() : null, rangeProto.getStartRowInclusive(), rangeProto.hasStopRow() ? @@ -252,8 +252,8 @@ public class MultiRowRangeFilter extends FilterBase { if (ranges.isEmpty()) { throw new IllegalArgumentException("No ranges found."); } - List invalidRanges = new ArrayList(); - List newRanges = new ArrayList(ranges.size()); + List invalidRanges = new ArrayList<>(); + List newRanges = new ArrayList<>(ranges.size()); Collections.sort(ranges); if(ranges.get(0).isValid()) { if (ranges.size() == 1) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java index bc268124c77..12d9ac7558a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java @@ -164,7 +164,7 @@ public class MultipleColumnPrefixFilter extends FilterBase { } public TreeSet createTreeSet() { - return new TreeSet(new Comparator() { + return new TreeSet<>(new Comparator() { @Override public int compare (Object o1, Object o2) { if (o1 == null || o2 == null) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index f59ddb5d27d..082378591aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -56,7 +56,7 @@ public class ParseFilter { static { // Registers all the filter supported by the Filter Language - filterHashMap = new HashMap(); + filterHashMap = new HashMap<>(); filterHashMap.put("KeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + "KeyOnlyFilter"); filterHashMap.put("FirstKeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." + @@ -95,7 +95,7 @@ public class ParseFilter { "DependentColumnFilter"); // Creates the operatorPrecedenceHashMap - operatorPrecedenceHashMap = new HashMap(); + operatorPrecedenceHashMap = new HashMap<>(); operatorPrecedenceHashMap.put(ParseConstants.SKIP_BUFFER, 1); operatorPrecedenceHashMap.put(ParseConstants.WHILE_BUFFER, 1); operatorPrecedenceHashMap.put(ParseConstants.AND_BUFFER, 2); @@ -122,9 +122,9 @@ public class ParseFilter { public Filter parseFilterString (byte [] filterStringAsByteArray) throws CharacterCodingException { // stack for the operators and parenthesis - Stack operatorStack = new Stack(); + Stack operatorStack = new Stack<>(); // stack for the filter objects - Stack filterStack = new Stack(); + Stack filterStack = new Stack<>(); Filter filter = null; for (int i=0; i filterArguments = new ArrayList(); + ArrayList filterArguments = new ArrayList<>(); for (int i = argumentListStartIndex + 1; i listOfFilters = new ArrayList(); + ArrayList listOfFilters = new ArrayList<>(); while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.OR_BUFFER)) { Filter filter = filterStack.pop(); listOfFilters.add(0, filter); @@ -410,7 +410,7 @@ public class ParseFilter { } else if (argumentOnTopOfStack.equals(ParseConstants.AND_BUFFER)) { // The top of the stack is an AND try { - ArrayList listOfFilters = new ArrayList(); + ArrayList listOfFilters = new ArrayList<>(); while (!operatorStack.empty() && operatorStack.peek().equals(ParseConstants.AND_BUFFER)) { Filter filter = filterStack.pop(); listOfFilters.add(0, filter); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java index 921b7b40c3a..8c58f910f20 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java @@ -77,7 +77,7 @@ public class TimestampsFilter extends FilterBase { Preconditions.checkArgument(timestamp >= 0, "must be positive %s", timestamp); } this.canHint = canHint; - this.timestamps = new TreeSet(timestamps); + this.timestamps = new TreeSet<>(timestamps); init(); } @@ -85,7 +85,7 @@ public class TimestampsFilter extends FilterBase { * @return the list of timestamps */ public List getTimestamps() { - List list = new ArrayList(timestamps.size()); + List list = new ArrayList<>(timestamps.size()); list.addAll(timestamps); return list; } @@ -157,7 +157,7 @@ public class TimestampsFilter extends FilterBase { } public static Filter createFilterFromArguments(ArrayList filterArguments) { - ArrayList timestamps = new ArrayList(filterArguments.size()); + ArrayList timestamps = new ArrayList<>(filterArguments.size()); for (int i = 0; i id2Call = new HashMap(); + private final Map id2Call = new HashMap<>(); public NettyRpcDuplexHandler(NettyRpcConnection conn, CellBlockBuilder cellBlockBuilder, Codec codec, CompressionCodec compressor) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index d935a08a533..4f68447b993 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -118,8 +118,7 @@ public final class ProtobufUtil { /** * Primitive type to class mapping. */ - private final static Map> - PRIMITIVES = new HashMap>(); + private final static Map> PRIMITIVES = new HashMap<>(); /** * Many results are simple: no cell, exists true or false. To save on object creations, @@ -1384,7 +1383,7 @@ public final class ProtobufUtil { return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT; } - List cells = new ArrayList(values.size()); + List cells = new ArrayList<>(values.size()); for (CellProtos.Cell c : values) { cells.add(toCell(c)); } @@ -1418,7 +1417,7 @@ public final class ProtobufUtil { List cells = null; if (proto.hasAssociatedCellCount()) { int count = proto.getAssociatedCellCount(); - cells = new ArrayList(count + values.size()); + cells = new ArrayList<>(count + values.size()); for (int i = 0; i < count; i++) { if (!scanner.advance()) throw new IOException("Failed get " + i + " of " + count); cells.add(scanner.current()); @@ -1426,7 +1425,7 @@ public final class ProtobufUtil { } if (!values.isEmpty()){ - if (cells == null) cells = new ArrayList(values.size()); + if (cells == null) cells = new ArrayList<>(values.size()); for (CellProtos.Cell c: values) { cells.add(toCell(c)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java index c3db6ee7027..309dd9c89e7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Strings; @InterfaceAudience.Public @InterfaceStability.Evolving public class QuotaFilter { - private Set types = new HashSet(); + private Set types = new HashSet<>(); private boolean hasFilters = false; private String namespaceRegex; private String tableRegex; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index 37e4a922dc8..fecd2d15dd5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -47,7 +47,7 @@ import org.apache.hadoop.util.StringUtils; public class QuotaRetriever implements Closeable, Iterable { private static final Log LOG = LogFactory.getLog(QuotaRetriever.class); - private final Queue cache = new LinkedList(); + private final Queue cache = new LinkedList<>(); private ResultScanner scanner; /** * Connection to use. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java index a7c49b3dc9d..1a8b934e261 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java @@ -84,7 +84,7 @@ public class QuotaSettingsFactory { private static List fromQuotas(final String userName, final TableName tableName, final String namespace, final Quotas quotas) { - List settings = new ArrayList(); + List settings = new ArrayList<>(); if (quotas.hasThrottle()) { settings.addAll(fromThrottle(userName, tableName, namespace, quotas.getThrottle())); } @@ -96,7 +96,7 @@ public class QuotaSettingsFactory { private static List fromThrottle(final String userName, final TableName tableName, final String namespace, final QuotaProtos.Throttle throttle) { - List settings = new ArrayList(); + List settings = new ArrayList<>(); if (throttle.hasReqNum()) { settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace, ThrottleType.REQUEST_NUMBER, throttle.getReqNum())); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index 790f0216c04..f7cc2dd955b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -46,8 +46,8 @@ public class ReplicationPeerConfig { private long bandwidth = 0; public ReplicationPeerConfig() { - this.peerData = new TreeMap(Bytes.BYTES_COMPARATOR); - this.configuration = new HashMap(0); + this.peerData = new TreeMap<>(Bytes.BYTES_COMPARATOR); + this.configuration = new HashMap<>(0); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java index 8b13f751a49..3973be938e2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java @@ -49,7 +49,7 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase private ReplicationPeerConfig peerConfig; private final String id; private volatile PeerState peerState; - private volatile Map> tableCFs = new HashMap>(); + private volatile Map> tableCFs = new HashMap<>(); private final Configuration conf; private PeerStateTracker peerStateTracker; private PeerConfigTracker peerConfigTracker; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index cf5be83d1fd..02fe2f13b1a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -87,7 +87,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re final ReplicationQueuesClient queuesClient, Abortable abortable) { super(zk, conf, abortable); this.abortable = abortable; - this.peerClusters = new ConcurrentHashMap(); + this.peerClusters = new ConcurrentHashMap<>(); this.queuesClient = queuesClient; } @@ -128,7 +128,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re ZKUtil.createWithParents(this.zookeeper, this.peersZNode); - List listOfOps = new ArrayList(2); + List listOfOps = new ArrayList<>(2); ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id), ReplicationSerDeHelper.toByteArray(peerConfig)); // b/w PeerWatcher and ReplicationZookeeper#add method to create the @@ -246,7 +246,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re @Override public Map getAllPeerConfigs() { - Map peers = new TreeMap(); + Map peers = new TreeMap<>(); List ids = null; try { ids = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); @@ -331,10 +331,10 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re CompoundConfiguration compound = new CompoundConfiguration(); compound.add(otherConf); compound.addStringMap(peerConfig.getConfiguration()); - return new Pair(peerConfig, compound); + return new Pair<>(peerConfig, compound); } - return new Pair(peerConfig, otherConf); + return new Pair<>(peerConfig, otherConf); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index 64eedfbd4f9..1403f6d2de5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -42,7 +42,7 @@ public class ReplicationQueueInfo { private final String peerClusterZnode; private boolean queueRecovered; // List of all the dead region servers that had this queue (if recovered) - private List deadRegionServers = new ArrayList(); + private List deadRegionServers = new ArrayList<>(); /** * The passed znode will be either the id of the peer cluster or diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java index 484084e0211..4733706c3b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java @@ -248,7 +248,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R this.abortable.abort("Failed to get a list of queues for region server: " + this.myQueuesZnode, e); } - return listOfQueues == null ? new ArrayList() : listOfQueues; + return listOfQueues == null ? new ArrayList<>() : listOfQueues; } /** @@ -329,7 +329,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R } int size = pairs.size(); - List listOfOps = new ArrayList(size); + List listOfOps = new ArrayList<>(size); for (int i = 0; i < size; i++) { listOfOps.add(ZKUtilOp.createAndFailSilent( @@ -356,7 +356,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R } int size = files.size(); - List listOfOps = new ArrayList(size); + List listOfOps = new ArrayList<>(size); for (int i = 0; i < size; i++) { listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i)))); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java index 61bb041ba08..4606e22fc75 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java @@ -142,7 +142,7 @@ abstract class ReplicationTableBase { */ private Executor setUpExecutor() { ThreadPoolExecutor tempExecutor = new ThreadPoolExecutor(NUM_INITIALIZE_WORKERS, - NUM_INITIALIZE_WORKERS, 100, TimeUnit.MILLISECONDS, new LinkedBlockingQueue()); + NUM_INITIALIZE_WORKERS, 100, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); tfb.setNameFormat("ReplicationTableExecutor-%d"); tfb.setDaemon(true); @@ -223,7 +223,7 @@ abstract class ReplicationTableBase { */ protected List getListOfReplicators() { // scan all of the queues and return a list of all unique OWNER values - Set peerServers = new HashSet(); + Set peerServers = new HashSet<>(); ResultScanner allQueuesInCluster = null; try (Table replicationTable = getOrBlockOnReplicationTable()){ Scan scan = new Scan(); @@ -240,11 +240,11 @@ abstract class ReplicationTableBase { allQueuesInCluster.close(); } } - return new ArrayList(peerServers); + return new ArrayList<>(peerServers); } protected List getAllQueues(String serverName) { - List allQueues = new ArrayList(); + List allQueues = new ArrayList<>(); ResultScanner queueScanner = null; try { queueScanner = getQueuesBelongingToServer(serverName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java index 9d182dc9cd5..9865d83c95a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java @@ -45,10 +45,9 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements // All about stopping private final Stoppable stopper; // listeners to be notified - private final List listeners = - new CopyOnWriteArrayList(); + private final List listeners = new CopyOnWriteArrayList<>(); // List of all the other region servers in this cluster - private final ArrayList otherRegionServers = new ArrayList(); + private final ArrayList otherRegionServers = new ArrayList<>(); private final ReplicationPeers replicationPeers; public ReplicationTrackerZKImpl(ZooKeeperWatcher zookeeper, @@ -80,7 +79,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements List list = null; synchronized (otherRegionServers) { - list = new ArrayList(otherRegionServers); + list = new ArrayList<>(otherRegionServers); } return list; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java index dcbed7ac275..35075473bdb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java @@ -72,7 +72,7 @@ public class TableBasedReplicationQueuesClientImpl extends ReplicationTableBase @Override public Set getAllWALs() { - Set allWals = new HashSet(); + Set allWals = new HashSet<>(); ResultScanner allQueues = null; try (Table replicationTable = getOrBlockOnReplicationTable()) { allQueues = replicationTable.getScanner(new Scan()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java index 1023e0d6455..bf55e8cf615 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java @@ -201,7 +201,7 @@ public class TableBasedReplicationQueuesImpl extends ReplicationTableBase public List getLogsInQueue(String queueId) { String errMsg = "Failed getting logs in queue queueId=" + queueId; byte[] rowKey = queueIdToRowKey(queueId); - List logs = new ArrayList(); + List logs = new ArrayList<>(); try { Get getQueue = new Get(rowKey); Result queue = getResultIfOwner(getQueue); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index e48f81def78..0e8a68d370a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto @InterfaceAudience.Private public class SecurityInfo { /** Maps RPC service names to authentication information */ - private static ConcurrentMap infos = new ConcurrentHashMap(); + private static ConcurrentMap infos = new ConcurrentHashMap<>(); // populate info for known services static { infos.put(AdminProtos.AdminService.getDescriptor().getName(), diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index eeac9c7dbbe..1c4a8685474 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -266,7 +266,7 @@ public class AccessControlClient { HBaseRpcController controller = ((ClusterConnection) connection).getRpcControllerFactory().newController(); */ - List permList = new ArrayList(); + List permList = new ArrayList<>(); try (Table table = connection.getTable(ACL_TABLE_NAME)) { try (Admin admin = connection.getAdmin()) { CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java index 1d26366dc6d..1873ea3a6c3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java @@ -367,7 +367,7 @@ public class AccessControlUtil { */ public static List toPermissionActions( List protoActions) { - List actions = new ArrayList(protoActions.size()); + List actions = new ArrayList<>(protoActions.size()); for (AccessControlProtos.Permission.Action a : protoActions) { actions.add(toPermissionAction(a)); } @@ -644,7 +644,7 @@ public class AccessControlUtil { AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = protocol.getUserPermissions(controller, request); - List perms = new ArrayList(response.getUserPermissionCount()); + List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm: response.getUserPermissionList()) { perms.add(toUserPermission(perm)); } @@ -672,7 +672,7 @@ public class AccessControlUtil { AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = protocol.getUserPermissions(controller, request); - List perms = new ArrayList(response.getUserPermissionCount()); + List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm: response.getUserPermissionList()) { perms.add(toUserPermission(perm)); } @@ -700,7 +700,7 @@ public class AccessControlUtil { AccessControlProtos.GetUserPermissionsRequest request = builder.build(); AccessControlProtos.GetUserPermissionsResponse response = protocol.getUserPermissions(controller, request); - List perms = new ArrayList(response.getUserPermissionCount()); + List perms = new ArrayList<>(response.getUserPermissionCount()); for (AccessControlProtos.UserPermission perm: response.getUserPermissionList()) { perms.add(toUserPermission(perm)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java index 4b3ed54cd09..5fdeee98600 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java @@ -34,7 +34,7 @@ public class Authorizations { private List labels; public Authorizations(String... labels) { - this.labels = new ArrayList(labels.length); + this.labels = new ArrayList<>(labels.length); Collections.addAll(this.labels, labels); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index cd153f1d6aa..d87bf145dc5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -130,7 +130,7 @@ public class VisibilityClient { new Batch.Call() { ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { @@ -215,7 +215,7 @@ public class VisibilityClient { new Batch.Call() { ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); @@ -268,7 +268,7 @@ public class VisibilityClient { new Batch.Call() { ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); @@ -340,7 +340,7 @@ public class VisibilityClient { new Batch.Call() { ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 24302be9eff..38ae04ae6de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -191,8 +191,7 @@ public final class ProtobufUtil { /** * Primitive type to class mapping. */ - private final static Map> - PRIMITIVES = new HashMap>(); + private final static Map> PRIMITIVES = new HashMap<>(); /** * Many results are simple: no cell, exists true or false. To save on object creations, @@ -1491,7 +1490,7 @@ public final class ProtobufUtil { return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT; } - List cells = new ArrayList(values.size()); + List cells = new ArrayList<>(values.size()); for (CellProtos.Cell c : values) { cells.add(toCell(c)); } @@ -1525,7 +1524,7 @@ public final class ProtobufUtil { List cells = null; if (proto.hasAssociatedCellCount()) { int count = proto.getAssociatedCellCount(); - cells = new ArrayList(count + values.size()); + cells = new ArrayList<>(count + values.size()); for (int i = 0; i < count; i++) { if (!scanner.advance()) throw new IOException("Failed get " + i + " of " + count); cells.add(scanner.current()); @@ -1533,7 +1532,7 @@ public final class ProtobufUtil { } if (!values.isEmpty()){ - if (cells == null) cells = new ArrayList(values.size()); + if (cells == null) cells = new ArrayList<>(values.size()); for (CellProtos.Cell c: values) { cells.add(toCell(c)); } @@ -1903,7 +1902,7 @@ public final class ProtobufUtil { */ static List getRegionInfos(final GetOnlineRegionResponse proto) { if (proto == null) return null; - List regionInfos = new ArrayList(proto.getRegionInfoList().size()); + List regionInfos = new ArrayList<>(proto.getRegionInfoList().size()); for (RegionInfo regionInfo: proto.getRegionInfoList()) { regionInfos.add(HRegionInfo.convert(regionInfo)); } @@ -2719,7 +2718,7 @@ public final class ProtobufUtil { public static List toReplicationLoadSourceList( List clsList) { - ArrayList rlsList = new ArrayList(clsList.size()); + ArrayList rlsList = new ArrayList<>(clsList.size()); for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) { rlsList.add(toReplicationLoadSource(cls)); } @@ -2976,26 +2975,26 @@ public final class ProtobufUtil { public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) { Map servers = null; - servers = new HashMap(proto.getLiveServersList().size()); + servers = new HashMap<>(proto.getLiveServersList().size()); for (LiveServerInfo lsi : proto.getLiveServersList()) { servers.put(ProtobufUtil.toServerName( lsi.getServer()), new ServerLoad(lsi.getServerLoad())); } Collection deadServers = null; - deadServers = new ArrayList(proto.getDeadServersList().size()); + deadServers = new ArrayList<>(proto.getDeadServersList().size()); for (HBaseProtos.ServerName sn : proto.getDeadServersList()) { deadServers.add(ProtobufUtil.toServerName(sn)); } Collection backupMasters = null; - backupMasters = new ArrayList(proto.getBackupMastersList().size()); + backupMasters = new ArrayList<>(proto.getBackupMastersList().size()); for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) { backupMasters.add(ProtobufUtil.toServerName(sn)); } Set rit = null; - rit = new HashSet(proto.getRegionsInTransitionList().size()); + rit = new HashSet<>(proto.getRegionsInTransitionList().size()); for (RegionInTransition region : proto.getRegionsInTransitionList()) { RegionState value = RegionState.convert(region.getRegionState()); rit.add(value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index d3ef7b873c5..998b3c00800 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -572,8 +572,7 @@ public final class RequestConverter { .setService(userToken.getService().toString()).build(); } - List protoFamilyPaths = - new ArrayList(familyPaths.size()); + List protoFamilyPaths = new ArrayList<>(familyPaths.size()); if (!familyPaths.isEmpty()) { ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder = ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java index cbcad8025d6..ecadbbc38dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java @@ -233,7 +233,7 @@ public final class ResponseConverter { public static List getRegionOpeningStateList( final OpenRegionResponse proto) { if (proto == null) return null; - List regionOpeningStates = new ArrayList(proto.getOpeningStateCount()); + List regionOpeningStates = new ArrayList<>(proto.getOpeningStateCount()); for (int i = 0; i < proto.getOpeningStateCount(); i++) { regionOpeningStates.add(RegionOpeningState.valueOf( proto.getOpeningState(i).name())); @@ -394,7 +394,7 @@ public final class ResponseConverter { boolean isPartial = response.getPartialFlagPerResultCount() > i ? response.getPartialFlagPerResult(i) : false; - List cells = new ArrayList(noOfCells); + List cells = new ArrayList<>(noOfCells); for (int j = 0; j < noOfCells; j++) { try { if (cellScanner.advance() == false) { @@ -426,7 +426,7 @@ public final class ResponseConverter { } public static Map getScanMetrics(ScanResponse response) { - Map metricMap = new HashMap(); + Map metricMap = new HashMap<>(); if (response == null || !response.hasScanMetrics() || response.getScanMetrics() == null) { return metricMap; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index b683fcc9e4e..2131db359f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -57,7 +57,7 @@ public class PoolMap implements Map { private int poolMaxSize; - private Map> pools = new ConcurrentHashMap>(); + private Map> pools = new ConcurrentHashMap<>(); public PoolMap(PoolType poolType) { this.poolType = poolType; @@ -107,7 +107,7 @@ public class PoolMap implements Map { @Override public Collection values() { - Collection values = new ArrayList(); + Collection values = new ArrayList<>(); for (Pool pool : pools.values()) { Collection poolValues = pool.values(); if (poolValues != null) { @@ -118,7 +118,7 @@ public class PoolMap implements Map { } public Collection values(K key) { - Collection values = new ArrayList(); + Collection values = new ArrayList<>(); Pool pool = pools.get(key); if (pool != null) { Collection poolValues = pool.values(); @@ -185,7 +185,7 @@ public class PoolMap implements Map { @Override public Set> entrySet() { - Set> entries = new HashSet>(); + Set> entries = new HashSet<>(); for (Map.Entry> poolEntry : pools.entrySet()) { final K poolKey = poolEntry.getKey(); final Pool pool = poolEntry.getValue(); @@ -271,11 +271,11 @@ public class PoolMap implements Map { protected Pool createPool() { switch (poolType) { case Reusable: - return new ReusablePool(poolMaxSize); + return new ReusablePool<>(poolMaxSize); case RoundRobin: - return new RoundRobinPool(poolMaxSize); + return new RoundRobinPool<>(poolMaxSize); case ThreadLocal: - return new ThreadLocalPool(); + return new ThreadLocalPool<>(); } return null; } @@ -389,7 +389,7 @@ public class PoolMap implements Map { * the type of the resource */ static class ThreadLocalPool extends ThreadLocal implements Pool { - private static final Map, AtomicInteger> poolSizes = new HashMap, AtomicInteger>(); + private static final Map, AtomicInteger> poolSizes = new HashMap<>(); public ThreadLocalPool() { } @@ -441,7 +441,7 @@ public class PoolMap implements Map { @Override public Collection values() { - List values = new ArrayList(); + List values = new ArrayList<>(); values.add(get()); return values; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java index 940d523f50d..abe307942b6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -68,7 +68,7 @@ public class Writables { * @throws IOException e */ public static byte [] getBytes(final Writable... ws) throws IOException { - List bytes = new ArrayList(ws.length); + List bytes = new ArrayList<>(ws.length); int size = 0; for (Writable w: ws) { byte [] b = getBytes(w); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java index 612799727d1..0090b6f2cd8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -106,7 +106,7 @@ public class HQuorumPeer { conf.get("hbase.zookeeper.dns.interface","default"), conf.get("hbase.zookeeper.dns.nameserver","default"))); - List ips = new ArrayList(); + List ips = new ArrayList<>(); // Add what could be the best (configured) match ips.add(myAddress.contains(".") ? diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java index 7458ac760e3..e63bfc56ac9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java @@ -74,7 +74,7 @@ class InstancePending { */ void prepare(T instance) { assert instance != null; - instanceHolder = new InstanceHolder(instance); + instanceHolder = new InstanceHolder<>(instance); pendingLatch.countDown(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index e8431a2e75b..afab54a14e2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -106,8 +106,8 @@ public class MetaTableLocator { public List> getMetaRegionsAndLocations(ZooKeeperWatcher zkw, int replicaId) { ServerName serverName = getMetaRegionLocation(zkw, replicaId); - List> list = new ArrayList>(1); - list.add(new Pair(RegionReplicaUtil.getRegionInfoForReplica( + List> list = new ArrayList<>(1); + list.add(new Pair<>(RegionReplicaUtil.getRegionInfoForReplica( HRegionInfo.FIRST_META_REGIONINFO, replicaId), serverName)); return list; } @@ -135,7 +135,7 @@ public class MetaTableLocator { private List getListOfHRegionInfos( final List> pairs) { if (pairs == null || pairs.isEmpty()) return null; - List result = new ArrayList(pairs.size()); + List result = new ArrayList<>(pairs.size()); for (Pair pair: pairs) { result.add(pair.getFirst()); } @@ -550,7 +550,7 @@ public class MetaTableLocator { throws InterruptedException { int numReplicasConfigured = 1; - List servers = new ArrayList(); + List servers = new ArrayList<>(); // Make the blocking call first so that we do the wait to know // the znodes are all in place or timeout. ServerName server = blockUntilAvailable(zkw, timeout); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java index 11d0e5dfc44..da7d1767e72 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java @@ -33,7 +33,7 @@ import org.apache.zookeeper.Watcher; * and then call the method {@code PendingWatcher.prepare}. */ class PendingWatcher implements Watcher { - private final InstancePending pending = new InstancePending(); + private final InstancePending pending = new InstancePending<>(); @Override public void process(WatchedEvent event) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 14532cfd839..43a5ad9187e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -637,7 +637,7 @@ public class RecoverableZooKeeper { throws UnsupportedOperationException { if(ops == null) return null; - List preparedOps = new LinkedList(); + List preparedOps = new LinkedList<>(); for (Op op : ops) { if (op.getType() == ZooDefs.OpCode.create) { CreateRequest create = (CreateRequest)op.toRequestRecord(); @@ -777,7 +777,7 @@ public class RecoverableZooKeeper { */ private static List filterByPrefix(List nodes, String... prefixes) { - List lockChildren = new ArrayList(); + List lockChildren = new ArrayList<>(); for (String child : nodes){ for (String prefix : prefixes){ if (child.startsWith(prefix)){ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 3e00e0459eb..c678a7cf7a0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -235,8 +235,7 @@ public class ZKUtil { private static final String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME = "zookeeper-client-keytab-kerberos"; - private static final Map BASIC_JAAS_OPTIONS = - new HashMap(); + private static final Map BASIC_JAAS_OPTIONS = new HashMap<>(); static { String jaasEnvVar = System.getenv("HBASE_JAAS_DEBUG"); if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) { @@ -244,8 +243,7 @@ public class ZKUtil { } } - private static final Map KEYTAB_KERBEROS_OPTIONS = - new HashMap(); + private static final Map KEYTAB_KERBEROS_OPTIONS = new HashMap<>(); static { KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true"); KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true"); @@ -746,7 +744,7 @@ public class ZKUtil { List nodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode); if (nodes != null) { - List newNodes = new ArrayList(); + List newNodes = new ArrayList<>(); for (String node : nodes) { String nodePath = ZKUtil.joinZNode(baseNode, node); byte[] data = ZKUtil.getDataAndWatch(zkw, nodePath); @@ -905,11 +903,11 @@ public class ZKUtil { return Ids.OPEN_ACL_UNSAFE; } if (isSecureZooKeeper) { - ArrayList acls = new ArrayList(); + ArrayList acls = new ArrayList<>(); // add permission to hbase supper user String[] superUsers = zkw.getConfiguration().getStrings(Superusers.SUPERUSER_CONF_KEY); if (superUsers != null) { - List groups = new ArrayList(); + List groups = new ArrayList<>(); for (String user : superUsers) { if (AuthUtil.isGroupPrincipal(user)) { // TODO: Set node ACL for groups when ZK supports this feature @@ -1327,7 +1325,7 @@ public class ZKUtil { LOG.warn("Given path is not valid!"); return; } - List ops = new ArrayList(); + List ops = new ArrayList<>(); for (String eachRoot : pathRoots) { List children = listChildrenBFSNoWatch(zkw, eachRoot); // Delete the leaves first and eventually get rid of the root @@ -1377,7 +1375,7 @@ public class ZKUtil { LOG.warn("Given path is not valid!"); return; } - List ops = new ArrayList(); + List ops = new ArrayList<>(); for (String eachRoot : pathRoots) { // ZooKeeper Watches are one time triggers; When children of parent nodes are deleted // recursively, must set another watch, get notified of delete node @@ -1415,8 +1413,8 @@ public class ZKUtil { */ private static List listChildrenBFSNoWatch(ZooKeeperWatcher zkw, final String znode) throws KeeperException { - Deque queue = new LinkedList(); - List tree = new ArrayList(); + Deque queue = new LinkedList<>(); + List tree = new ArrayList<>(); queue.add(znode); while (true) { String node = queue.pollFirst(); @@ -1451,8 +1449,8 @@ public class ZKUtil { */ private static List listChildrenBFSAndWatchThem(ZooKeeperWatcher zkw, final String znode) throws KeeperException { - Deque queue = new LinkedList(); - List tree = new ArrayList(); + Deque queue = new LinkedList<>(); + List tree = new ArrayList<>(); queue.add(znode); while (true) { String node = queue.pollFirst(); @@ -1648,7 +1646,7 @@ public class ZKUtil { } if (ops == null) return; - List zkOps = new LinkedList(); + List zkOps = new LinkedList<>(); for (ZKUtilOp op : ops) { zkOps.add(toZooKeeperOp(zkw, op)); } @@ -1816,7 +1814,7 @@ public class ZKUtil { private static void appendRSZnodes(ZooKeeperWatcher zkw, String znode, StringBuilder sb) throws KeeperException { - List stack = new LinkedList(); + List stack = new LinkedList<>(); stack.add(znode); do { String znodeToProcess = stack.remove(stack.size() - 1); @@ -1927,7 +1925,7 @@ public class ZKUtil { socket.getInputStream())); out.println("stat"); out.flush(); - ArrayList res = new ArrayList(); + ArrayList res = new ArrayList<>(); while (true) { String line = in.readLine(); if (line != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index c8462fb3979..f18b8ba6b74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -83,8 +83,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { public final ZNodePaths znodePaths; // listeners to be notified - private final List listeners = - new CopyOnWriteArrayList(); + private final List listeners = new CopyOnWriteArrayList<>(); // Used by ZKUtil:waitForZKConnectionIfAuthenticating to wait for SASL // negotiation to complete @@ -374,7 +373,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { */ public List getMetaReplicaNodes() throws KeeperException { List childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, znodePaths.baseZNode); - List metaReplicaNodes = new ArrayList(2); + List metaReplicaNodes = new ArrayList<>(2); if (childrenOfBaseNode != null) { String pattern = conf.get("zookeeper.znode.metaserver","meta-region-server"); for (String child : childrenOfBaseNode) { @@ -416,7 +415,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * Get a copy of current registered listeners */ public List getListeners() { - return new ArrayList(listeners); + return new ArrayList<>(listeners); } /** diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java index d0b63179042..9acbb43eb42 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java @@ -342,7 +342,7 @@ public class TestInterfaceAudienceAnnotations { @Test public void testProtosInReturnTypes() throws ClassNotFoundException, IOException, LinkageError { Set> classes = findPublicClasses(); - List, Method>> protosReturnType = new ArrayList, Method>>(); + List, Method>> protosReturnType = new ArrayList<>(); for (Class clazz : classes) { findProtoInReturnType(clazz, protosReturnType); } @@ -374,8 +374,7 @@ public class TestInterfaceAudienceAnnotations { @Test public void testProtosInParamTypes() throws ClassNotFoundException, IOException, LinkageError { Set> classes = findPublicClasses(); - List, Method, Class>> protosParamType = - new ArrayList, Method, Class>>(); + List, Method, Class>> protosParamType = new ArrayList<>(); for (Class clazz : classes) { findProtoInParamType(clazz, protosParamType); } @@ -395,7 +394,7 @@ public class TestInterfaceAudienceAnnotations { @Test public void testProtosInConstructors() throws ClassNotFoundException, IOException, LinkageError { Set> classes = findPublicClasses(); - List> classList = new ArrayList>(); + List> classList = new ArrayList<>(); for (Class clazz : classes) { Constructor[] constructors = clazz.getConstructors(); for (Constructor cons : constructors) { @@ -424,7 +423,7 @@ public class TestInterfaceAudienceAnnotations { private void findProtoInReturnType(Class clazz, List, Method>> protosReturnType) { - Pair, Method> returnTypePair = new Pair, Method>(); + Pair, Method> returnTypePair = new Pair<>(); Method[] methods = clazz.getMethods(); returnTypePair.setFirst(clazz); for (Method method : methods) { @@ -443,7 +442,7 @@ public class TestInterfaceAudienceAnnotations { private void findProtoInParamType(Class clazz, List, Method, Class>> protosParamType) { - Triple, Method, Class> paramType = new Triple, Method, Class>(); + Triple, Method, Class> paramType = new Triple<>(); Method[] methods = clazz.getMethods(); paramType.setFirst(clazz); for (Method method : methods) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 75199a61dc3..f2f04673b0f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -147,7 +147,7 @@ public class TestAsyncProcess { static class MyAsyncProcess extends AsyncProcess { final AtomicInteger nbMultiResponse = new AtomicInteger(); final AtomicInteger nbActions = new AtomicInteger(); - public List allReqs = new ArrayList(); + public List allReqs = new ArrayList<>(); public AtomicInteger callsCt = new AtomicInteger(); private long previousTimeout = -1; @@ -162,7 +162,7 @@ public class TestAsyncProcess { return DUMMY_TABLE; } }; - AsyncRequestFutureImpl r = new MyAsyncRequestFutureImpl( + AsyncRequestFutureImpl r = new MyAsyncRequestFutureImpl<>( wrap, actions, nonceGroup, this); allReqs.add(r); return r; @@ -326,9 +326,9 @@ public class TestAsyncProcess { } } class MyAsyncProcessWithReplicas extends MyAsyncProcess { - private Set failures = new TreeSet(new Bytes.ByteArrayComparator()); + private Set failures = new TreeSet<>(new Bytes.ByteArrayComparator()); private long primarySleepMs = 0, replicaSleepMs = 0; - private Map customPrimarySleepMs = new HashMap(); + private Map customPrimarySleepMs = new HashMap<>(); private final AtomicLong replicaCalls = new AtomicLong(0); public void addFailures(HRegionInfo... hris) { @@ -683,7 +683,7 @@ public class TestAsyncProcess { ClusterConnection hc = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(hc, CONF); - List puts = new ArrayList(1); + List puts = new ArrayList<>(1); puts.add(createPut(1, true)); ap.submit(null, DUMMY_TABLE, puts, false, null, false); @@ -702,7 +702,7 @@ public class TestAsyncProcess { }; MyAsyncProcess ap = new MyAsyncProcess(hc, CONF); - List puts = new ArrayList(1); + List puts = new ArrayList<>(1); puts.add(createPut(1, true)); final AsyncRequestFuture ars = ap.submit(null, DUMMY_TABLE, puts, false, cb, false); @@ -719,7 +719,7 @@ public class TestAsyncProcess { SimpleRequestController.class.getName()); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); SimpleRequestController controller = (SimpleRequestController) ap.requestController; - List puts = new ArrayList(1); + List puts = new ArrayList<>(1); puts.add(createPut(1, true)); for (int i = 0; i != controller.maxConcurrentTasksPerRegion; ++i) { @@ -748,7 +748,7 @@ public class TestAsyncProcess { SimpleRequestController controller = (SimpleRequestController) ap.requestController; controller.taskCounterPerServer.put(sn2, new AtomicInteger(controller.maxConcurrentTasksPerServer)); - List puts = new ArrayList(4); + List puts = new ArrayList<>(4); puts.add(createPut(1, true)); puts.add(createPut(3, true)); // <== this one won't be taken, the rs is busy puts.add(createPut(1, true)); // <== this one will make it, the region is already in @@ -770,7 +770,7 @@ public class TestAsyncProcess { public void testFail() throws Exception { MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false); - List puts = new ArrayList(1); + List puts = new ArrayList<>(1); Put p = createPut(1, false); puts.add(p); @@ -818,7 +818,7 @@ public class TestAsyncProcess { } }; - List puts = new ArrayList(1); + List puts = new ArrayList<>(1); Put p = createPut(1, true); puts.add(p); @@ -844,7 +844,7 @@ public class TestAsyncProcess { public void testFailAndSuccess() throws Exception { MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false); - List puts = new ArrayList(3); + List puts = new ArrayList<>(3); puts.add(createPut(1, false)); puts.add(createPut(1, true)); puts.add(createPut(1, true)); @@ -871,7 +871,7 @@ public class TestAsyncProcess { public void testFlush() throws Exception { MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false); - List puts = new ArrayList(3); + List puts = new ArrayList<>(3); puts.add(createPut(1, false)); puts.add(createPut(1, true)); puts.add(createPut(1, true)); @@ -956,7 +956,7 @@ public class TestAsyncProcess { } }; - List puts = new ArrayList(1); + List puts = new ArrayList<>(1); puts.add(createPut(1, true)); t.start(); @@ -981,7 +981,7 @@ public class TestAsyncProcess { t2.start(); long start = System.currentTimeMillis(); - ap.submit(null, DUMMY_TABLE, new ArrayList(), false, null, false); + ap.submit(null, DUMMY_TABLE, new ArrayList<>(), false, null, false); long end = System.currentTimeMillis(); //Adds 100 to secure us against approximate timing. @@ -1008,7 +1008,7 @@ public class TestAsyncProcess { setMockLocation(hc, DUMMY_BYTES_1, hrls1); setMockLocation(hc, DUMMY_BYTES_2, hrls2); setMockLocation(hc, DUMMY_BYTES_3, hrls3); - List locations = new ArrayList(); + List locations = new ArrayList<>(); for (HRegionLocation loc : hrls1.getRegionLocations()) { locations.add(loc); } @@ -1172,7 +1172,7 @@ public class TestAsyncProcess { HTable ht = new HTable(conn, mutator); ht.multiAp = new MyAsyncProcess(conn, CONF, false); - List puts = new ArrayList(7); + List puts = new ArrayList<>(7); puts.add(createPut(1, true)); puts.add(createPut(1, true)); puts.add(createPut(1, true)); @@ -1309,8 +1309,8 @@ public class TestAsyncProcess { @Test public void testThreadCreation() throws Exception { final int NB_REGS = 100; - List hrls = new ArrayList(NB_REGS); - List gets = new ArrayList(NB_REGS); + List hrls = new ArrayList<>(NB_REGS); + List gets = new ArrayList<>(NB_REGS); for (int i = 0; i < NB_REGS; i++) { HRegionInfo hri = new HRegionInfo( DUMMY_TABLE, Bytes.toBytes(i * 10L), Bytes.toBytes(i * 10L + 9L), false, i); @@ -1518,7 +1518,7 @@ public class TestAsyncProcess { } private static List makeTimelineGets(byte[]... rows) { - List result = new ArrayList(rows.length); + List result = new ArrayList<>(rows.length); for (byte[] row : rows) { Get get = new Get(row); get.setConsistency(Consistency.TIMELINE); @@ -1609,10 +1609,10 @@ public class TestAsyncProcess { ClusterConnection hc = createHConnection(); MyThreadPoolExecutor myPool = new MyThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(200)); + new LinkedBlockingQueue<>(200)); AsyncProcess ap = new AsyncProcessForThrowableCheck(hc, CONF); - List puts = new ArrayList(1); + List puts = new ArrayList<>(1); puts.add(createPut(1, true)); AsyncProcessTask task = AsyncProcessTask.newBuilder() .setPool(myPool) diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index a4be9a2b7fe..d20c7c87b02 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -368,8 +368,7 @@ public class TestClientNoCluster extends Configured implements Tool { throws IOException { super(conf, pool, user); int serverCount = conf.getInt("hbase.test.servers", 10); - this.serversByClient = - new HashMap(serverCount); + this.serversByClient = new HashMap<>(serverCount); this.meta = makeMeta(Bytes.toBytes( conf.get("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE))), conf.getInt("hbase.test.regions", 100), @@ -694,14 +693,13 @@ public class TestClientNoCluster extends Configured implements Tool { final int regionCount, final long namespaceSpan, final int serverCount) { // I need a comparator for meta rows so we sort properly. SortedMap> meta = - new ConcurrentSkipListMap>(new MetaRowsComparator()); + new ConcurrentSkipListMap<>(new MetaRowsComparator()); HRegionInfo [] hris = makeHRegionInfos(tableName, regionCount, namespaceSpan); ServerName [] serverNames = makeServerNames(serverCount); int per = regionCount / serverCount; int count = 0; for (HRegionInfo hri: hris) { - Pair p = - new Pair(hri, serverNames[count++ / per]); + Pair p = new Pair<>(hri, serverNames[count++ / per]); meta.put(hri.getRegionName(), p); } return meta; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java index e93319a1f73..f22e5d434ad 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java @@ -48,7 +48,7 @@ public class TestKeyOnlyFilter { @Parameters public static Collection parameters() { - List paramList = new ArrayList(2); + List paramList = new ArrayList<>(2); { paramList.add(new Object[] { false }); paramList.add(new Object[] { true }); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java index 0659f303647..0ec78ad5717 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java @@ -40,7 +40,7 @@ public class TestHBaseRpcControllerImpl { @Test public void testListOfCellScannerables() throws IOException { final int count = 10; - List cells = new ArrayList(count); + List cells = new ArrayList<>(count); for (int i = 0; i < count; i++) { cells.add(createCell(i)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java index d2d0a53032a..771cf52ed28 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java @@ -80,7 +80,7 @@ public class BuilderStyleTest { } Set sigMethods = methodsBySignature.get(sig); if (sigMethods == null) { - sigMethods = new HashSet(); + sigMethods = new HashSet<>(); methodsBySignature.put(sig, sigMethods); } sigMethods.add(method); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java index 667fed80a33..e67c9fdbdf5 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java @@ -29,8 +29,8 @@ import org.junit.experimental.categories.Category; public class TestInstancePending { @Test(timeout = 1000) public void test() throws Exception { - final InstancePending pending = new InstancePending(); - final AtomicReference getResultRef = new AtomicReference(); + final InstancePending pending = new InstancePending<>(); + final AtomicReference getResultRef = new AtomicReference<>(); new Thread() { @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index 28c1d8894b4..5930928259d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -1597,7 +1597,7 @@ public final class CellUtil { * @return Tags in the given Cell as a List */ public static List getTags(Cell cell) { - List tags = new ArrayList(); + List tags = new ArrayList<>(); Iterator tagsItr = tagsIterator(cell); while (tagsItr.hasNext()) { tags.add(tagsItr.next()); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java index 99dc1639c86..d4ec48ea5e7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java @@ -134,8 +134,8 @@ public class ChoreService implements ChoreServicer { } scheduler.setRemoveOnCancelPolicy(true); - scheduledChores = new HashMap>(); - choresMissingStartTime = new HashMap(); + scheduledChores = new HashMap<>(); + choresMissingStartTime = new HashMap<>(); } /** @@ -348,7 +348,7 @@ public class ChoreService implements ChoreServicer { } private void cancelAllChores(final boolean mayInterruptIfRunning) { - ArrayList choresToCancel = new ArrayList(scheduledChores.keySet().size()); + ArrayList choresToCancel = new ArrayList<>(scheduledChores.keySet().size()); // Build list of chores to cancel so we can iterate through a set that won't change // as chores are cancelled. If we tried to cancel each chore while iterating through // keySet the results would be undefined because the keySet would be changing @@ -365,7 +365,7 @@ public class ChoreService implements ChoreServicer { * Prints a summary of important details about the chore. Used for debugging purposes */ private void printChoreDetails(final String header, ScheduledChore chore) { - LinkedHashMap output = new LinkedHashMap(); + LinkedHashMap output = new LinkedHashMap<>(); output.put(header, ""); output.put("Chore name: ", chore.getName()); output.put("Chore period: ", Integer.toString(chore.getPeriod())); @@ -380,7 +380,7 @@ public class ChoreService implements ChoreServicer { * Prints a summary of important details about the service. Used for debugging purposes */ private void printChoreServiceDetails(final String header) { - LinkedHashMap output = new LinkedHashMap(); + LinkedHashMap output = new LinkedHashMap<>(); output.put(header, ""); output.put("ChoreService corePoolSize: ", Integer.toString(getCorePoolSize())); output.put("ChoreService scheduledChores: ", Integer.toString(getNumberOfScheduledChores())); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java index 0eda1e5e4fc..a7fcba6a219 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java @@ -72,8 +72,7 @@ public class CompoundConfiguration extends Configuration { int size(); } - private final List configs - = new ArrayList(); + private final List configs = new ArrayList<>(); static class ImmutableConfWrapper implements ImmutableConfigMap { private final Configuration c; @@ -167,7 +166,7 @@ public class CompoundConfiguration extends Configuration { @Override public Iterator> iterator() { - Map ret = new HashMap(); + Map ret = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { String key = Bytes.toString(entry.getKey().get()); String val = entry.getValue() == null ? null : Bytes.toString(entry.getValue().get()); @@ -366,7 +365,7 @@ public class CompoundConfiguration extends Configuration { @Override public Iterator> iterator() { - Map ret = new HashMap(); + Map ret = new HashMap<>(); // add in reverse order so that oldest get overridden. if (!configs.isEmpty()) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 0434820189d..96fc30b6f2f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -81,7 +81,7 @@ import com.google.common.annotations.VisibleForTesting; */ @InterfaceAudience.Private public class KeyValue implements ExtendedCell { - private static final ArrayList EMPTY_ARRAY_LIST = new ArrayList(); + private static final ArrayList EMPTY_ARRAY_LIST = new ArrayList<>(); private static final Log LOG = LogFactory.getLog(KeyValue.class); @@ -1174,7 +1174,7 @@ public class KeyValue implements ExtendedCell { * @return the Map<String,?> containing data from this key */ public Map toStringMap() { - Map stringMap = new HashMap(); + Map stringMap = new HashMap<>(); stringMap.put("row", Bytes.toStringBinary(getRowArray(), getRowOffset(), getRowLength())); stringMap.put("family", Bytes.toStringBinary(getFamilyArray(), getFamilyOffset(), getFamilyLength())); @@ -1184,7 +1184,7 @@ public class KeyValue implements ExtendedCell { stringMap.put("vlen", getValueLength()); List tags = getTags(); if (tags != null) { - List tagsString = new ArrayList(tags.size()); + List tagsString = new ArrayList<>(tags.size()); for (Tag t : tags) { tagsString.add(t.toString()); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index ca990cf5c4a..807749af737 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -496,7 +496,7 @@ public class KeyValueUtil { return KeyValueUtil.ensureKeyValue(arg0); } }); - return new ArrayList(lazyList); + return new ArrayList<>(lazyList); } /** * Write out a KeyValue in the manner in which we used to when KeyValue was a diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java index e1ceace36f4..23876ab74e2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java @@ -57,14 +57,14 @@ public class NamespaceDescriptor { public final static Set RESERVED_NAMESPACES; static { - Set set = new HashSet(); + Set set = new HashSet<>(); set.add(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR); set.add(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); RESERVED_NAMESPACES = Collections.unmodifiableSet(set); } public final static Set RESERVED_NAMESPACES_BYTES; static { - Set set = new TreeSet(Bytes.BYTES_RAWCOMPARATOR); + Set set = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR); for(String name: RESERVED_NAMESPACES) { set.add(Bytes.toBytes(name)); } @@ -165,7 +165,7 @@ public class NamespaceDescriptor { @InterfaceStability.Evolving public static class Builder { private String bName; - private Map bConfiguration = new TreeMap(); + private Map bConfiguration = new TreeMap<>(); private Builder(NamespaceDescriptor ns) { this.bName = ns.name; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java index 499ffd99304..0c0a7ffa868 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -99,7 +99,7 @@ public class ServerName implements Comparable, Serializable { * @see #getVersionedBytes() */ private byte [] bytes; - public static final List EMPTY_SERVER_LIST = new ArrayList(0); + public static final List EMPTY_SERVER_LIST = new ArrayList<>(0); protected ServerName(final String hostname, final int port, final long startcode) { this(Address.fromParts(hostname, port), startcode); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index 63066b3e82b..9b9755bd689 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.KeyValue.KVComparator; public final class TableName implements Comparable { /** See {@link #createTableNameIfNecessary(ByteBuffer, ByteBuffer)} */ - private static final Set tableCache = new CopyOnWriteArraySet(); + private static final Set tableCache = new CopyOnWriteArraySet<>(); /** Namespace delimiter */ //this should always be only 1 byte long diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java index f9668ddd591..936d8c27e41 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java @@ -72,7 +72,7 @@ public final class TagUtil { * @return List of tags */ public static List asList(byte[] b, int offset, int length) { - List tags = new ArrayList(); + List tags = new ArrayList<>(); int pos = offset; while (pos < offset + length) { int tagLen = Bytes.readAsInt(b, pos, TAG_LENGTH_SIZE); @@ -91,7 +91,7 @@ public final class TagUtil { * @return List of tags */ public static List asList(ByteBuffer b, int offset, int length) { - List tags = new ArrayList(); + List tags = new ArrayList<>(); int pos = offset; while (pos < offset + length) { int tagLen = ByteBufferUtils.readAsInt(b, pos, TAG_LENGTH_SIZE); @@ -239,7 +239,7 @@ public final class TagUtil { } List tags = tagsOrNull; if (tags == null) { - tags = new ArrayList(); + tags = new ArrayList<>(); } while (itr.hasNext()) { tags.add(itr.next()); @@ -276,7 +276,7 @@ public final class TagUtil { // in the array so set its size to '1' (I saw this being done in earlier version of // tag-handling). if (tags == null) { - tags = new ArrayList(1); + tags = new ArrayList<>(1); } tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl))); return tags; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java index 939d12d1477..079a27709ec 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java @@ -51,7 +51,7 @@ import com.google.common.annotations.VisibleForTesting; public class BoundedByteBufferPool { private static final Log LOG = LogFactory.getLog(BoundedByteBufferPool.class); - private final Queue buffers = new ConcurrentLinkedQueue(); + private final Queue buffers = new ConcurrentLinkedQueue<>(); @VisibleForTesting int getQueueSize() { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java index c334a5a7d9c..9c6796e7b84 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java @@ -44,8 +44,8 @@ public class ByteBufferListOutputStream extends ByteBufferOutputStream { // it is not available will make a new one our own and keep writing to that. We keep track of all // the BBs that we got from pool, separately so that on closeAndPutbackBuffers, we can make sure // to return back all of them to pool - protected List allBufs = new ArrayList(); - protected List bufsFromPool = new ArrayList(); + protected List allBufs = new ArrayList<>(); + protected List bufsFromPool = new ArrayList<>(); private boolean lastBufFlipped = false;// Indicate whether the curBuf/lastBuf is flipped already diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java index 115671dfeb5..07ba3db7cd5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java @@ -54,7 +54,7 @@ public class ByteBufferPool { public static final int DEFAULT_BUFFER_SIZE = 64 * 1024;// 64 KB. Making it same as the chunk size // what we will write/read to/from the // socket channel. - private final Queue buffers = new ConcurrentLinkedQueue(); + private final Queue buffers = new ConcurrentLinkedQueue<>(); private final int bufferSize; private final int maxPoolSize; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java index ad89ca054c7..b6c2e978626 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java @@ -533,15 +533,14 @@ public final class Encryption { } } - static final Map,KeyProvider> keyProviderCache = - new ConcurrentHashMap,KeyProvider>(); + static final Map,KeyProvider> keyProviderCache = new ConcurrentHashMap<>(); public static KeyProvider getKeyProvider(Configuration conf) { String providerClassName = conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName()); String providerParameters = conf.get(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, ""); try { - Pair providerCacheKey = new Pair(providerClassName, + Pair providerCacheKey = new Pair<>(providerClassName, providerParameters); KeyProvider provider = keyProviderCache.get(providerCacheKey); if (provider != null) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index cef51d80bf9..22d7e3ed9a2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -738,7 +738,7 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder { protected KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue(); // A temp pair object which will be reused by ByteBuff#asSubByteBuffer calls. This avoids too // many object creations. - protected final ObjectIntPair tmpPair = new ObjectIntPair(); + protected final ObjectIntPair tmpPair = new ObjectIntPair<>(); protected STATE current, previous; public BufferedEncodedSeeker(CellComparator comparator, diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java index d81bb4a973a..0f8ea017d0e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java @@ -110,7 +110,7 @@ public class RowIndexCodecV1 extends AbstractDataBlockEncoder { RowIndexSeekerV1 seeker = new RowIndexSeekerV1(CellComparator.COMPARATOR, decodingCtx); seeker.setCurrentBuffer(new SingleByteBuff(sourceAsBuffer)); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); kvs.add(seeker.getCell()); while (seeker.next()) { kvs.add(seeker.getCell()); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java index 4e14acbe99a..6ac564532bd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java @@ -40,7 +40,7 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker { // A temp pair object which will be reused by ByteBuff#asSubByteBuffer calls. This avoids too // many object creations. - protected final ObjectIntPair tmpPair = new ObjectIntPair(); + protected final ObjectIntPair tmpPair = new ObjectIntPair<>(); private ByteBuff currentBuffer; private SeekerState current = new SeekerState(); // always valid diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/LRUDictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/LRUDictionary.java index 99780ba6935..24569617261 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/LRUDictionary.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/LRUDictionary.java @@ -86,7 +86,7 @@ public class LRUDictionary implements Dictionary { private Node head; private Node tail; - private HashMap nodeToIndex = new HashMap(); + private HashMap nodeToIndex = new HashMap<>(); private Node[] indexToNode; private int initSize = 0; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java index 0e1c3ae8e3b..93252842821 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java @@ -135,7 +135,7 @@ public class StreamUtils { int newOffset = offset; byte tmp = input[newOffset++]; if (tmp >= 0) { - return new Pair((int) tmp, newOffset - offset); + return new Pair<>((int) tmp, newOffset - offset); } int result = tmp & 0x7f; tmp = input[newOffset++]; @@ -160,7 +160,7 @@ public class StreamUtils { for (int i = 0; i < 5; i++) { tmp = input[newOffset++]; if (tmp >= 0) { - return new Pair(result, newOffset - offset); + return new Pair<>(result, newOffset - offset); } } throw new IOException("Malformed varint"); @@ -168,7 +168,7 @@ public class StreamUtils { } } } - return new Pair(result, newOffset - offset); + return new Pair<>(result, newOffset - offset); } public static Pair readRawVarint32(ByteBuffer input, int offset) @@ -176,7 +176,7 @@ public class StreamUtils { int newOffset = offset; byte tmp = input.get(newOffset++); if (tmp >= 0) { - return new Pair((int) tmp, newOffset - offset); + return new Pair<>((int) tmp, newOffset - offset); } int result = tmp & 0x7f; tmp = input.get(newOffset++); @@ -201,7 +201,7 @@ public class StreamUtils { for (int i = 0; i < 5; i++) { tmp = input.get(newOffset++); if (tmp >= 0) { - return new Pair(result, newOffset - offset); + return new Pair<>(result, newOffset - offset); } } throw new IOException("Malformed varint"); @@ -209,7 +209,7 @@ public class StreamUtils { } } } - return new Pair(result, newOffset - offset); + return new Pair<>(result, newOffset - offset); } public static short toShort(byte hi, byte lo) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java index 0e45410430d..9f6b7b53a2f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/SingleByteBuff.java @@ -202,7 +202,7 @@ public class SingleByteBuff extends ByteBuff { } else { // TODO we can do some optimization here? Call to asSubByteBuffer might // create a copy. - ObjectIntPair pair = new ObjectIntPair(); + ObjectIntPair pair = new ObjectIntPair<>(); src.asSubByteBuffer(srcOffset, length, pair); if (pair.getFirst() != null) { ByteBufferUtils.copyFromBufferToBuffer(pair.getFirst(), this.buf, pair.getSecond(), offset, diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java index c480dad23c9..be2a0d39bb5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java @@ -354,8 +354,7 @@ public abstract class User { } static class TestingGroups extends Groups { - private final Map> userToGroupsMapping = - new HashMap>(); + private final Map> userToGroupsMapping = new HashMap<>(); private Groups underlyingImplementation; TestingGroups(Groups underlyingImplementation) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java index 43b1c89194b..955abfc96ac 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java @@ -90,7 +90,7 @@ public class UserProvider extends BaseConfigurable { private String[] getGroupStrings(String ugi) { try { - Set result = new LinkedHashSet(groups.getGroups(ugi)); + Set result = new LinkedHashSet<>(groups.getGroups(ugi)); return result.toArray(new String[result.size()]); } catch (Exception e) { return new String[0]; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java index f632ae05cbc..1b6a67d491f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java @@ -63,7 +63,7 @@ public class SpanReceiverHost { } SpanReceiverHost(Configuration conf) { - receivers = new HashSet(); + receivers = new HashSet<>(); this.conf = conf; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/StructBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/StructBuilder.java index c9c3b64b727..d73a17d3070 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/StructBuilder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/StructBuilder.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceStability.Evolving public class StructBuilder { - protected final List> fields = new ArrayList>(); + protected final List> fields = new ArrayList<>(); /** * Create an empty {@code StructBuilder}. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ArrayUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ArrayUtils.java index 4e3374e25b9..51b87f763f8 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ArrayUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ArrayUtils.java @@ -102,7 +102,7 @@ public class ArrayUtils { public static ArrayList toList(long[] array){ int length = length(array); - ArrayList list = new ArrayList(length); + ArrayList list = new ArrayList<>(length); for(int i=0; i < length; ++i){ list.add(array[i]); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AvlUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AvlUtil.java index 260a8b2d6cd..58c50a8266c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AvlUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AvlUtil.java @@ -261,7 +261,7 @@ public final class AvlUtil { final AvlNodeVisitor visitor) { if (root == null) return; - final AvlTreeIterator iterator = new AvlTreeIterator(root); + final AvlTreeIterator iterator = new AvlTreeIterator<>(root); boolean visitNext = true; while (visitNext && iterator.hasNext()) { visitNext = visitor.visitNode(iterator.next()); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java index c3fa547b327..ba38097cc09 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java @@ -58,8 +58,8 @@ public class BoundedCompletionService { public BoundedCompletionService(Executor executor, int maxTasks) { this.executor = executor; - this.tasks = new ArrayList>(maxTasks); - this.completed = new ArrayBlockingQueue>(maxTasks); + this.tasks = new ArrayList<>(maxTasks); + this.completed = new ArrayBlockingQueue<>(maxTasks); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java index 7de1b1375a8..9248b4125a5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java @@ -49,7 +49,7 @@ public class ByteRangeUtils { public static ArrayList copyToNewArrays(Collection ranges) { if (ranges == null) { - return new ArrayList(0); + return new ArrayList<>(0); } ArrayList arrays = Lists.newArrayListWithCapacity(ranges.size()); for (ByteRange range : ranges) { @@ -60,7 +60,7 @@ public class ByteRangeUtils { public static ArrayList fromArrays(Collection arrays) { if (arrays == null) { - return new ArrayList(0); + return new ArrayList<>(0); } ArrayList ranges = Lists.newArrayListWithCapacity(arrays.size()); for (byte[] array : arrays) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java index 8cc71a3151d..1470d5c0fb1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java @@ -34,8 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; @InterfaceAudience.Private public class CollectionUtils { - private static final List EMPTY_LIST = Collections.unmodifiableList( - new ArrayList(0)); + private static final List EMPTY_LIST = Collections.unmodifiableList(new ArrayList<>(0)); @SuppressWarnings("unchecked") diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java index 0f00132dec0..ba54f9d49cb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; */ @InterfaceAudience.Private public class ConcatenatedLists implements Collection { - protected final ArrayList> components = new ArrayList>(); + protected final ArrayList> components = new ArrayList<>(); protected int size = 0; public void addAllSublists(List> items) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java index c3635cbd49f..bc51440ab53 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java @@ -130,13 +130,13 @@ public class CoprocessorClassLoader extends ClassLoaderBase { /** * A locker used to synchronize class loader initialization per coprocessor jar file */ - private static final KeyLocker locker = new KeyLocker(); + private static final KeyLocker locker = new KeyLocker<>(); /** * A set used to synchronized parent path clean up. Generally, there * should be only one parent path, but using a set so that we can support more. */ - static final HashSet parentDirLockSet = new HashSet(); + static final HashSet parentDirLockSet = new HashSet<>(); /** * Creates a JarClassLoader that loads classes from the given paths. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java index 0d3a5c64230..36ca7adc487 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Counter.java @@ -94,7 +94,7 @@ public class Counter { } private Counter(Cell initCell) { - containerRef = new AtomicReference(new Container(initCell)); + containerRef = new AtomicReference<>(new Container(initCell)); } private static int hash() { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DynamicClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DynamicClassLoader.java index 595cc5b3bc6..1a730696c45 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DynamicClassLoader.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DynamicClassLoader.java @@ -102,7 +102,7 @@ public class DynamicClassLoader extends ClassLoaderBase { // FindBugs: Making synchronized to avoid IS2_INCONSISTENT_SYNC complaints about // remoteDirFs and jarModifiedTime being part synchronized protected. private synchronized void initTempDir(final Configuration conf) { - jarModifiedTime = new HashMap(); + jarModifiedTime = new HashMap<>(); String localDirPath = conf.get( LOCAL_DIR_KEY, DEFAULT_LOCAL_DIR) + DYNAMIC_JARS_DIR; localDir = new File(localDirPath); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IterableUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IterableUtils.java index 41e837d1f84..862da436b21 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IterableUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IterableUtils.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; public class IterableUtils { private static final List EMPTY_LIST = Collections - .unmodifiableList(new ArrayList(0)); + .unmodifiableList(new ArrayList<>(0)); @SuppressWarnings("unchecked") public static Iterable nullSafe(Iterable in) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java index 05bd66d4dfd..6acf58426b9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java @@ -49,7 +49,7 @@ public class KeyLocker { private static final int NB_CONCURRENT_LOCKS = 1000; private final WeakObjectPool lockPool = - new WeakObjectPool( + new WeakObjectPool<>( new WeakObjectPool.ObjectFactory() { @Override public ReentrantLock createObject(K key) { @@ -85,7 +85,7 @@ public class KeyLocker { Arrays.sort(keyArray); lockPool.purge(); - Map locks = new LinkedHashMap(keyArray.length); + Map locks = new LinkedHashMap<>(keyArray.length); for (Object o : keyArray) { @SuppressWarnings("unchecked") K key = (K)o; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java index 159924f821a..719d1ee07a9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java @@ -64,7 +64,7 @@ public class Pair implements Serializable * @return a new pair containing the passed arguments */ public static Pair newPair(T1 a, T2 b) { - return new Pair(a, b); + return new Pair<>(a, b); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java index 21b376c606f..279ce95efab 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java @@ -191,7 +191,7 @@ public class Threads { ThreadFactory threadFactory) { ThreadPoolExecutor boundedCachedThreadPool = new ThreadPoolExecutor(maxCachedThread, maxCachedThread, timeout, - unit, new LinkedBlockingQueue(), threadFactory); + unit, new LinkedBlockingQueue<>(), threadFactory); // allow the core pool threads timeout and terminate boundedCachedThreadPool.allowCoreThreadTimeOut(true); return boundedCachedThreadPool; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java index 1de6bee54b7..9ee0ab5117a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java @@ -41,7 +41,7 @@ public class Triple { // ctor cannot infer types w/o warning but a method can. public static Triple create(A first, B second, C third) { - return new Triple(first, second, third); + return new Triple<>(first, second, third); } public int hashCode() { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WeakObjectPool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WeakObjectPool.java index 7757c6caf4d..478864b338b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WeakObjectPool.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WeakObjectPool.java @@ -50,7 +50,7 @@ public class WeakObjectPool { V createObject(K key); } - private final ReferenceQueue staleRefQueue = new ReferenceQueue(); + private final ReferenceQueue staleRefQueue = new ReferenceQueue<>(); private class ObjectReference extends WeakReference { final K key; @@ -126,8 +126,7 @@ public class WeakObjectPool { } this.objectFactory = objectFactory; - this.referenceCache = new ConcurrentHashMap( - initialCapacity, 0.75f, concurrencyLevel); + this.referenceCache = new ConcurrentHashMap<>(initialCapacity, 0.75f, concurrencyLevel); // 0.75f is the default load factor threshold of ConcurrentHashMap. } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java index c73705a22c5..87d56a9355f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java @@ -171,7 +171,7 @@ public class RedundantKVGenerator { private List generateRows() { // generate prefixes - List prefixes = new ArrayList(); + List prefixes = new ArrayList<>(); prefixes.add(new byte[0]); for (int i = 1; i < numberOfRowPrefixes; ++i) { int prefixLength = averagePrefixLength; @@ -184,7 +184,7 @@ public class RedundantKVGenerator { } // generate rest of the row - List rows = new ArrayList(); + List rows = new ArrayList<>(); for (int i = 0; i < numberOfRows; ++i) { int suffixLength = averageSuffixLength; suffixLength += randomizer.nextInt(2 * suffixLengthVariance + 1) - @@ -213,10 +213,10 @@ public class RedundantKVGenerator { * @return sorted list of key values */ public List generateTestKeyValues(int howMany, boolean useTags) { - List result = new ArrayList(); + List result = new ArrayList<>(); List rows = generateRows(); - Map> rowsToQualifier = new HashMap>(); + Map> rowsToQualifier = new HashMap<>(); if(family==null){ family = new byte[columnFamilyLength]; @@ -249,7 +249,7 @@ public class RedundantKVGenerator { // add it to map if (!rowsToQualifier.containsKey(rowId)) { - rowsToQualifier.put(rowId, new ArrayList()); + rowsToQualifier.put(rowId, new ArrayList<>()); } rowsToQualifier.get(rowId).add(qualifier); } else if (qualifierChance > chanceForSameQualifier) { @@ -299,9 +299,9 @@ public class RedundantKVGenerator { * @return sorted list of key values */ public List generateTestExtendedOffheapKeyValues(int howMany, boolean useTags) { - List result = new ArrayList(); + List result = new ArrayList<>(); List rows = generateRows(); - Map> rowsToQualifier = new HashMap>(); + Map> rowsToQualifier = new HashMap<>(); if (family == null) { family = new byte[columnFamilyLength]; @@ -334,7 +334,7 @@ public class RedundantKVGenerator { // add it to map if (!rowsToQualifier.containsKey(rowId)) { - rowsToQualifier.put(rowId, new ArrayList()); + rowsToQualifier.put(rowId, new ArrayList<>()); } rowsToQualifier.get(rowId).add(qualifier); } else if (qualifierChance > chanceForSameQualifier) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java index 01d387c5fb8..0aa30eed6d1 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java @@ -148,8 +148,8 @@ public class ClassFinder { final Pattern jarResourceRe = Pattern.compile("^file:(.+\\.jar)!/" + path + "$"); Enumeration resources = ClassLoader.getSystemClassLoader().getResources(path); - List dirs = new ArrayList(); - List jars = new ArrayList(); + List dirs = new ArrayList<>(); + List jars = new ArrayList<>(); while (resources.hasMoreElements()) { URL resource = resources.nextElement(); @@ -168,7 +168,7 @@ public class ClassFinder { } } - Set> classes = new HashSet>(); + Set> classes = new HashSet<>(); for (File directory : dirs) { classes.addAll(findClassesFromFiles(directory, packageName, proceedOnExceptions)); } @@ -189,7 +189,7 @@ public class ClassFinder { throw ioEx; } - Set> classes = new HashSet>(); + Set> classes = new HashSet<>(); JarEntry entry = null; try { while (true) { @@ -236,7 +236,7 @@ public class ClassFinder { private Set> findClassesFromFiles(File baseDirectory, String packageName, boolean proceedOnExceptions) throws ClassNotFoundException, LinkageError { - Set> classes = new HashSet>(); + Set> classes = new HashSet<>(); if (!baseDirectory.exists()) { LOG.warn(baseDirectory.getAbsolutePath() + " does not exist"); return classes; diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java index 310a2fb5ab4..c0b98362c53 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java @@ -94,7 +94,7 @@ public class ResourceChecker { public List getStringsToLog() { return null; } } - private List ras = new ArrayList(); + private List ras = new ArrayList<>(); private int[] initialValues; private int[] endingValues; diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java index 6264a5ef410..751b9e3c89f 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java @@ -41,10 +41,10 @@ import org.junit.runner.notification.RunListener; * When surefire forkMode=once/always/perthread, this code is executed on the forked process. */ public class ResourceCheckerJUnitListener extends RunListener { - private Map rcs = new ConcurrentHashMap(); + private Map rcs = new ConcurrentHashMap<>(); static class ThreadResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - private static Set initialThreadNames = new HashSet(); + private static Set initialThreadNames = new HashSet<>(); private static List stringsToLog = null; @Override @@ -57,7 +57,7 @@ public class ResourceCheckerJUnitListener extends RunListener { } } else if (phase == Phase.END) { if (stackTraces.size() > initialThreadNames.size()) { - stringsToLog = new ArrayList(); + stringsToLog = new ArrayList<>(); for (Thread t : stackTraces.keySet()) { if (!initialThreadNames.contains(t.getName())) { stringsToLog.add("\nPotentially hanging thread: " + t.getName() + "\n"); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java index 41a011dc176..441d1b50bda 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java @@ -215,13 +215,13 @@ public class TestCellUtil { consume(doCreateCellArray(1), 1); consume(doCreateCellArray(0), 0); consume(doCreateCellArray(3), 3); - List cells = new ArrayList(hundredK); + List cells = new ArrayList<>(hundredK); for (int i = 0; i < hundredK; i++) { cells.add(new TestCellScannable(1)); } consume(CellUtil.createCellScanner(cells), hundredK * 1); - NavigableMap> m = new TreeMap>(Bytes.BYTES_COMPARATOR); - List cellArray = new ArrayList(hundredK); + NavigableMap> m = new TreeMap<>(Bytes.BYTES_COMPARATOR); + List cellArray = new ArrayList<>(hundredK); for (int i = 0; i < hundredK; i++) cellArray.add(new TestCell(i)); m.put(new byte [] {'f'}, cellArray); consume(CellUtil.createCellScanner(m), hundredK * 1); @@ -237,7 +237,7 @@ public class TestCellUtil { private CellScanner doCreateCellScanner(final int listsCount, final int itemsPerList) throws IOException { - List cells = new ArrayList(listsCount); + List cells = new ArrayList<>(listsCount); for (int i = 0; i < listsCount; i++) { CellScannable cs = new CellScannable() { @Override diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java index 515481088ea..244c26744f9 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java @@ -381,7 +381,7 @@ public class TestClassFinder { // Directory entries for all packages have to be added explicitly for // resources to be findable via ClassLoader. Directory entries must end // with "/"; the initial one is expected to, also. - Set pathsInJar = new HashSet(); + Set pathsInJar = new HashSet<>(); for (FileAndPath fileAndPath : filesInJar) { String pathToAdd = fileAndPath.path; while (pathsInJar.add(pathToAdd)) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCompoundConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCompoundConfiguration.java index 57409b6bbe3..0a0a1d24d6c 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCompoundConfiguration.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCompoundConfiguration.java @@ -121,8 +121,7 @@ public class TestCompoundConfiguration extends TestCase { @Test public void testWithIbwMap() { - Map map = - new HashMap(); + Map map = new HashMap<>(); map.put(strToIb("B"), strToIb("2b")); map.put(strToIb("C"), strToIb("33")); map.put(strToIb("D"), strToIb("4")); @@ -162,7 +161,7 @@ public class TestCompoundConfiguration extends TestCase { @Test public void testWithStringMap() { - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("B", "2b"); map.put("C", "33"); map.put("D", "4"); @@ -199,10 +198,10 @@ public class TestCompoundConfiguration extends TestCase { @Test public void testLaterConfigsOverrideEarlier() { - Map map1 = new HashMap(); + Map map1 = new HashMap<>(); map1.put("A", "2"); map1.put("D", "5"); - Map map2 = new HashMap(); + Map map2 = new HashMap<>(); String newValueForA = "3", newValueForB = "4"; map2.put("A", newValueForA); map2.put("B", newValueForB); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index 4e0090d24f3..562c00848c7 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -222,7 +222,7 @@ public class TestKeyValue extends TestCase { } public void testBinaryKeys() throws Exception { - Set set = new TreeSet(CellComparator.COMPARATOR); + Set set = new TreeSet<>(CellComparator.COMPARATOR); final byte [] fam = Bytes.toBytes("col"); final byte [] qf = Bytes.toBytes("umn"); final byte [] nb = new byte[0]; @@ -248,7 +248,7 @@ public class TestKeyValue extends TestCase { } assertTrue(assertion); // Make set with good comparator - set = new TreeSet(CellComparator.META_COMPARATOR); + set = new TreeSet<>(CellComparator.META_COMPARATOR); Collections.addAll(set, keys); count = 0; for (KeyValue k: set) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java index 8775d7f7bab..eca7712dca7 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java @@ -93,7 +93,7 @@ public class TestBoundedByteBufferPool { @Test public void testBufferSizeGrowWithMultiThread() throws Exception { - final ConcurrentLinkedDeque bufferQueue = new ConcurrentLinkedDeque(); + final ConcurrentLinkedDeque bufferQueue = new ConcurrentLinkedDeque<>(); int takeBufferThreadsCount = 30; int putBufferThreadsCount = 1; Thread takeBufferThreads[] = new Thread[takeBufferThreadsCount]; diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java index 5e609ad94fd..dddd9e7eef4 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestTagCompressionContext.java @@ -150,7 +150,7 @@ public class TestTagCompressionContext { } private KeyValue createKVWithTags(int noOfTags) { - List tags = new ArrayList(); + List tags = new ArrayList<>(); for (int i = 0; i < noOfTags; i++) { tags.add(new ArrayBackedTag((byte) i, "tagValue" + i)); } @@ -159,7 +159,7 @@ public class TestTagCompressionContext { } private Cell createOffheapKVWithTags(int noOfTags) { - List tags = new ArrayList(); + List tags = new ArrayList<>(); for (int i = 0; i < noOfTags; i++) { tags.add(new ArrayBackedTag((byte) i, "tagValue" + i)); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java index af4c4645237..48922d9c9ce 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/nio/TestMultiByteBuff.java @@ -243,7 +243,7 @@ public class TestMultiByteBuff { assertFalse(bb2 == sub); assertEquals(l2, ByteBufferUtils.toLong(sub, sub.position())); multi.rewind(); - ObjectIntPair p = new ObjectIntPair(); + ObjectIntPair p = new ObjectIntPair<>(); multi.asSubByteBuffer(8, Bytes.SIZEOF_LONG, p); assertFalse(bb1 == p.getFirst()); assertFalse(bb2 == p.getFirst()); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestFixedLengthWrapper.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestFixedLengthWrapper.java index b2594294363..c2c5a6db706 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestFixedLengthWrapper.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestFixedLengthWrapper.java @@ -51,7 +51,7 @@ public class TestFixedLengthWrapper { for (Order ord : new Order[] { Order.ASCENDING, Order.DESCENDING }) { for (byte[] val : VALUES) { buff.setPosition(0); - DataType type = new FixedLengthWrapper(new RawBytes(ord), limit); + DataType type = new FixedLengthWrapper<>(new RawBytes(ord), limit); assertEquals(limit, type.encode(buff, val)); buff.setPosition(0); byte[] actual = type.decode(buff); @@ -67,21 +67,21 @@ public class TestFixedLengthWrapper { @Test(expected = IllegalArgumentException.class) public void testInsufficientRemainingRead() { PositionedByteRange buff = new SimplePositionedMutableByteRange(0); - DataType type = new FixedLengthWrapper(new RawBytes(), 3); + DataType type = new FixedLengthWrapper<>(new RawBytes(), 3); type.decode(buff); } @Test(expected = IllegalArgumentException.class) public void testInsufficientRemainingWrite() { PositionedByteRange buff = new SimplePositionedMutableByteRange(0); - DataType type = new FixedLengthWrapper(new RawBytes(), 3); + DataType type = new FixedLengthWrapper<>(new RawBytes(), 3); type.encode(buff, Bytes.toBytes("")); } @Test(expected = IllegalArgumentException.class) public void testOverflowPassthrough() { PositionedByteRange buff = new SimplePositionedMutableByteRange(3); - DataType type = new FixedLengthWrapper(new RawBytes(), 0); + DataType type = new FixedLengthWrapper<>(new RawBytes(), 0); type.encode(buff, Bytes.toBytes("foo")); } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestStructNullExtension.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestStructNullExtension.java index e87438d9e1d..2b2efe68f73 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestStructNullExtension.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestStructNullExtension.java @@ -58,7 +58,7 @@ public class TestStructNullExtension { Struct shorter = builder.toStruct(); Struct longer = builder // intentionally include a wrapped instance to test wrapper behavior. - .add(new TerminatedWrapper(OrderedString.ASCENDING, "/")) + .add(new TerminatedWrapper<>(OrderedString.ASCENDING, "/")) .add(OrderedNumeric.ASCENDING) .toStruct(); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestTerminatedWrapper.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestTerminatedWrapper.java index e36a141f7eb..310067b2245 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestTerminatedWrapper.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/types/TestTerminatedWrapper.java @@ -47,7 +47,7 @@ public class TestTerminatedWrapper { @Test(expected = IllegalArgumentException.class) public void testEmptyDelimiter() { - new TerminatedWrapper(new RawBytes(), ""); + new TerminatedWrapper<>(new RawBytes(), ""); } @Test(expected = IllegalArgumentException.class) @@ -58,7 +58,7 @@ public class TestTerminatedWrapper { @Test(expected = IllegalArgumentException.class) public void testEncodedValueContainsTerm() { - DataType type = new TerminatedWrapper(new RawBytes(), "foo"); + DataType type = new TerminatedWrapper<>(new RawBytes(), "foo"); PositionedByteRange buff = new SimplePositionedMutableByteRange(16); type.encode(buff, Bytes.toBytes("hello foobar!")); } @@ -72,7 +72,7 @@ public class TestTerminatedWrapper { for (byte[] term : TERMINATORS) { for (String val : VALUES_STRINGS) { buff.setPosition(0); - DataType type = new TerminatedWrapper(t, term); + DataType type = new TerminatedWrapper<>(t, term); assertEquals(val.length() + 2 + term.length, type.encode(buff, val)); buff.setPosition(0); assertEquals(val, type.decode(buff)); @@ -89,7 +89,7 @@ public class TestTerminatedWrapper { for (byte[] term : TERMINATORS) { for (byte[] val : VALUES_BYTES) { buff.setPosition(0); - DataType type = new TerminatedWrapper(new RawBytes(ord), term); + DataType type = new TerminatedWrapper<>(new RawBytes(ord), term); assertEquals(val.length + term.length, type.encode(buff, val)); buff.setPosition(0); assertArrayEquals(val, type.decode(buff)); @@ -108,7 +108,7 @@ public class TestTerminatedWrapper { for (byte[] term : TERMINATORS) { for (String val : VALUES_STRINGS) { buff.setPosition(0); - DataType type = new TerminatedWrapper(t, term); + DataType type = new TerminatedWrapper<>(t, term); int expected = val.length() + 2 + term.length; assertEquals(expected, type.encode(buff, val)); buff.setPosition(0); @@ -126,7 +126,7 @@ public class TestTerminatedWrapper { for (byte[] term : TERMINATORS) { for (byte[] val : VALUES_BYTES) { buff.setPosition(0); - DataType type = new TerminatedWrapper(new RawBytes(ord), term); + DataType type = new TerminatedWrapper<>(new RawBytes(ord), term); int expected = type.encode(buff, val); buff.setPosition(0); assertEquals(expected, type.skip(buff)); @@ -139,7 +139,7 @@ public class TestTerminatedWrapper { @Test(expected = IllegalArgumentException.class) public void testInvalidSkip() { PositionedByteRange buff = new SimplePositionedMutableByteRange(Bytes.toBytes("foo")); - DataType type = new TerminatedWrapper(new RawBytes(), new byte[] { 0x00 }); + DataType type = new TerminatedWrapper<>(new RawBytes(), new byte[] { 0x00 }); type.skip(buff); } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java index 30e33d937fd..ba6cea07144 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java @@ -133,13 +133,13 @@ public class ClassLoaderTestHelper { // compile it by JavaCompiler JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); - ArrayList srcFileNames = new ArrayList(1); + ArrayList srcFileNames = new ArrayList<>(1); srcFileNames.add(sourceCodeFile.toString()); StandardJavaFileManager fm = compiler.getStandardFileManager(null, null, null); Iterable cu = fm.getJavaFileObjects(sourceCodeFile); - List options = new ArrayList(2); + List options = new ArrayList<>(2); options.add("-classpath"); // only add hbase classes to classpath. This is a little bit tricky: assume // the classpath is {hbaseSrc}/target/classes. diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestAvlUtil.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestAvlUtil.java index 3c7b680e703..554e10803af 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestAvlUtil.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestAvlUtil.java @@ -48,7 +48,7 @@ public class TestAvlUtil { final int MAX_KEY = 99999999; final int NELEM = 10000; - final TreeMap treeMap = new TreeMap(); + final TreeMap treeMap = new TreeMap<>(); TestAvlNode root = null; final Random rand = new Random(); @@ -117,7 +117,7 @@ public class TestAvlUtil { root = AvlTree.insert(root, new TestAvlNode(i)); } - AvlTreeIterator iter = new AvlTreeIterator(root); + AvlTreeIterator iter = new AvlTreeIterator<>(root); assertTrue(iter.hasNext()); long prevKey = 0; while (iter.hasNext()) { @@ -139,7 +139,7 @@ public class TestAvlUtil { } for (int i = MIN_KEY - 1; i < MAX_KEY + 1; ++i) { - AvlTreeIterator iter = new AvlTreeIterator(root, i, KEY_COMPARATOR); + AvlTreeIterator iter = new AvlTreeIterator<>(root, i, KEY_COMPARATOR); if (i < MAX_KEY) { assertTrue(iter.hasNext()); } else { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBase64.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBase64.java index 09ef707b58a..7c74bca7f96 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBase64.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBase64.java @@ -55,7 +55,7 @@ public class TestBase64 extends TestCase { * @throws UnsupportedEncodingException */ public void testBase64() throws UnsupportedEncodingException { - TreeMap sorted = new TreeMap(); + TreeMap sorted = new TreeMap<>(); for (int i = 0; i < uris.length; i++) { byte[] bytes = uris[i].getBytes("UTF-8"); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java index 2cc37511d12..6d9c49678ff 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBoundedArrayQueue.java @@ -31,7 +31,7 @@ import org.junit.experimental.categories.Category; public class TestBoundedArrayQueue { private int qMaxElements = 5; - private BoundedArrayQueue queue = new BoundedArrayQueue(qMaxElements); + private BoundedArrayQueue queue = new BoundedArrayQueue<>(qMaxElements); @Test public void testBoundedArrayQueueOperations() throws Exception { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java index e94293c3d1b..b78574a36a0 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferUtils.java @@ -77,7 +77,7 @@ public class TestByteBufferUtils { } static { - SortedSet a = new TreeSet(); + SortedSet a = new TreeSet<>(); for (int i = 0; i <= 63; ++i) { long v = (-1L) << i; assertTrue(v < 0); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java index 717e24c179b..8ae2a29b7d0 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteRangeWithKVSerialization.java @@ -64,7 +64,7 @@ public class TestByteRangeWithKVSerialization { final byte[] QUALIFIER = Bytes.toBytes("q1"); final byte[] VALUE = Bytes.toBytes("v"); int kvCount = 1000000; - List kvs = new ArrayList(kvCount); + List kvs = new ArrayList<>(kvCount); int totalSize = 0; Tag[] tags = new Tag[] { new ArrayBackedTag((byte) 1, "tag1") }; for (int i = 0; i < kvCount; i++) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java index e145642edc9..38b01b82c45 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java @@ -498,7 +498,7 @@ public class TestBytes extends TestCase { } public void testToFromHex() { - List testStrings = new ArrayList(8); + List testStrings = new ArrayList<>(8); testStrings.addAll(Arrays.asList(new String[] { "", "00", @@ -517,7 +517,7 @@ public class TestBytes extends TestCase { Assert.assertTrue(testString.equalsIgnoreCase(result)); } - List testByteData = new ArrayList(5); + List testByteData = new ArrayList<>(5); testByteData.addAll(Arrays.asList(new byte[][] { new byte[0], new byte[1], diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestConcatenatedLists.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestConcatenatedLists.java index fd4baf58a99..cfd288d8da7 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestConcatenatedLists.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestConcatenatedLists.java @@ -39,7 +39,7 @@ public class TestConcatenatedLists { @Test public void testUnsupportedOps() { // If adding support, add tests. - ConcatenatedLists c = new ConcatenatedLists(); + ConcatenatedLists c = new ConcatenatedLists<>(); c.addSublist(Arrays.asList(0L, 1L)); try { c.add(2L); @@ -82,19 +82,19 @@ public class TestConcatenatedLists { @Test public void testEmpty() { - verify(new ConcatenatedLists(), -1); + verify(new ConcatenatedLists<>(), -1); } @Test public void testOneOne() { - ConcatenatedLists c = new ConcatenatedLists(); + ConcatenatedLists c = new ConcatenatedLists<>(); c.addSublist(Arrays.asList(0L)); verify(c, 0); } @Test public void testOneMany() { - ConcatenatedLists c = new ConcatenatedLists(); + ConcatenatedLists c = new ConcatenatedLists<>(); c.addSublist(Arrays.asList(0L, 1L, 2L)); verify(c, 2); } @@ -102,7 +102,7 @@ public class TestConcatenatedLists { @Test @SuppressWarnings("unchecked") public void testManyOne() { - ConcatenatedLists c = new ConcatenatedLists(); + ConcatenatedLists c = new ConcatenatedLists<>(); c.addSublist(Arrays.asList(0L)); c.addAllSublists(Arrays.asList(Arrays.asList(1L), Arrays.asList(2L))); verify(c, 2); @@ -111,7 +111,7 @@ public class TestConcatenatedLists { @Test @SuppressWarnings("unchecked") public void testManyMany() { - ConcatenatedLists c = new ConcatenatedLists(); + ConcatenatedLists c = new ConcatenatedLists<>(); c.addAllSublists(Arrays.asList(Arrays.asList(0L, 1L))); c.addSublist(Arrays.asList(2L, 3L, 4L)); c.addAllSublists(Arrays.asList(Arrays.asList(5L), Arrays.asList(6L, 7L))); @@ -123,7 +123,7 @@ public class TestConcatenatedLists { assertEquals(last + 1, c.size()); assertTrue(c.containsAll(c)); Long[] array = c.toArray(new Long[c.size()]); - List all = new ArrayList(); + List all = new ArrayList<>(); Iterator iter = c.iterator(); for (Long i = 0L; i <= last; ++i) { assertTrue(iter.hasNext()); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestKeyLocker.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestKeyLocker.java index 40b918c4401..edf2f78eba9 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestKeyLocker.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestKeyLocker.java @@ -30,7 +30,7 @@ import org.junit.experimental.categories.Category; public class TestKeyLocker { @Test public void testLocker(){ - KeyLocker locker = new KeyLocker(); + KeyLocker locker = new KeyLocker<>(); ReentrantLock lock1 = locker.acquireLock("l1"); Assert.assertTrue(lock1.isHeldByCurrentThread()); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestLoadTestKVGenerator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestLoadTestKVGenerator.java index 120f2b66ca8..cf74a3ed94f 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestLoadTestKVGenerator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestLoadTestKVGenerator.java @@ -64,7 +64,7 @@ public class TestLoadTestKVGenerator { @Test public void testCorrectAndUniqueKeys() { - Set keys = new HashSet(); + Set keys = new HashSet<>(); for (int i = 0; i < 1000; ++i) { String k = LoadTestKVGenerator.md5PrefixedKey(i); assertFalse(keys.contains(k)); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java index bf1b4eb6294..d9fefa2f9eb 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestWeakObjectPool.java @@ -35,7 +35,7 @@ public class TestWeakObjectPool { @Before public void setUp() { - pool = new WeakObjectPool( + pool = new WeakObjectPool<>( new WeakObjectPool.ObjectFactory() { @Override public Object createObject(String key) { @@ -94,7 +94,7 @@ public class TestWeakObjectPool { final int THREAD_COUNT = 100; final AtomicBoolean assertionFailed = new AtomicBoolean(); - final AtomicReference expectedObjRef = new AtomicReference(); + final AtomicReference expectedObjRef = new AtomicReference<>(); final CountDownLatch prepareLatch = new CountDownLatch(THREAD_COUNT); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch endLatch = new CountDownLatch(THREAD_COUNT); diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 304722ec39f..810778b8529 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -208,7 +208,7 @@ public class AggregationClient implements Closeable { public R call(AggregateService instance) throws IOException { RpcController controller = new AggregationClientRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.getMax(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failed()) { @@ -280,7 +280,7 @@ public class AggregationClient implements Closeable { public R call(AggregateService instance) throws IOException { RpcController controller = new AggregationClientRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.getMin(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failed()) { @@ -355,7 +355,7 @@ public class AggregationClient implements Closeable { public Long call(AggregateService instance) throws IOException { RpcController controller = new AggregationClientRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.getRowNum(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failed()) { @@ -421,7 +421,7 @@ public class AggregationClient implements Closeable { RpcController controller = new AggregationClientRpcController(); // Not sure what is going on here why I have to do these casts. TODO. CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.getSum(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failed()) { @@ -472,7 +472,7 @@ public class AggregationClient implements Closeable { Long rowCount = 0l; public synchronized Pair getAvgArgs() { - return new Pair(sum, rowCount); + return new Pair<>(sum, rowCount); } @Override @@ -488,13 +488,13 @@ public class AggregationClient implements Closeable { public Pair call(AggregateService instance) throws IOException { RpcController controller = new AggregationClientRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.getAvg(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failed()) { throw new IOException(controller.errorText()); } - Pair pair = new Pair(null, 0L); + Pair pair = new Pair<>(null, 0L); if (response.getFirstPartCount() == 0) { return pair; } @@ -569,10 +569,10 @@ public class AggregationClient implements Closeable { S sumVal = null, sumSqVal = null; public synchronized Pair, Long> getStdParams() { - List l = new ArrayList(2); + List l = new ArrayList<>(2); l.add(sumVal); l.add(sumSqVal); - Pair, Long> p = new Pair, Long>(l, rowCountVal); + Pair, Long> p = new Pair<>(l, rowCountVal); return p; } @@ -592,17 +592,17 @@ public class AggregationClient implements Closeable { public Pair, Long> call(AggregateService instance) throws IOException { RpcController controller = new AggregationClientRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.getStd(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failed()) { throw new IOException(controller.errorText()); } - Pair, Long> pair = new Pair, Long>(new ArrayList(), 0L); + Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); if (response.getFirstPartCount() == 0) { return pair; } - List list = new ArrayList(); + List list = new ArrayList<>(); for (int i = 0; i < response.getFirstPartCount(); i++) { ByteString b = response.getFirstPart(i); T t = getParsedGenericInstance(ci.getClass(), 4, b); @@ -680,17 +680,15 @@ public class AggregationClient implements Closeable { getMedianArgs(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); - final NavigableMap> map = - new TreeMap>(Bytes.BYTES_COMPARATOR); + final NavigableMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); class StdCallback implements Batch.Callback> { S sumVal = null, sumWeights = null; public synchronized Pair>, List> getMedianParams() { - List l = new ArrayList(2); + List l = new ArrayList<>(2); l.add(sumVal); l.add(sumWeights); - Pair>, List> p = - new Pair>, List>(map, l); + Pair>, List> p = new Pair<>(map, l); return p; } @@ -708,14 +706,14 @@ public class AggregationClient implements Closeable { public List call(AggregateService instance) throws IOException { RpcController controller = new AggregationClientRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.getMedian(controller, requestArg, rpcCallback); AggregateResponse response = rpcCallback.get(); if (controller.failed()) { throw new IOException(controller.errorText()); } - List list = new ArrayList(); + List list = new ArrayList<>(); for (int i = 0; i < response.getFirstPartCount(); i++) { ByteString b = response.getFirstPart(i); T t = getParsedGenericInstance(ci.getClass(), 4, b); diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index bccb76a7201..3fbbd52f163 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -83,7 +83,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { T temp; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = env.getRegion().getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); byte[] colFamily = scan.getFamilies()[0]; NavigableSet qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; @@ -138,7 +138,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { T temp; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = env.getRegion().getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); byte[] colFamily = scan.getFamilies()[0]; NavigableSet qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; @@ -198,7 +198,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } - List results = new ArrayList(); + List results = new ArrayList<>(); boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); @@ -237,7 +237,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { RpcCallback done) { AggregateResponse response = null; long counter = 0l; - List results = new ArrayList(); + List results = new ArrayList<>(); InternalScanner scanner = null; try { Scan scan = ProtobufUtil.toScan(request.getScan()); @@ -308,7 +308,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } - List results = new ArrayList(); + List results = new ArrayList<>(); boolean hasMoreRows = false; do { @@ -368,7 +368,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } - List results = new ArrayList(); + List results = new ArrayList<>(); boolean hasMoreRows = false; @@ -434,7 +434,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { // if weighted median is requested, get qualifier for the weight column weightQualifier = qualifiers.pollLast(); } - List results = new ArrayList(); + List results = new ArrayList<>(); boolean hasMoreRows = false; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java index a9d10e86360..b52e5f92b91 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java @@ -84,7 +84,7 @@ implements Coprocessor, CoprocessorService { InternalScanner scanner = null; try { scanner = this.env.getRegion().getScanner(scan); - List curVals = new ArrayList(); + List curVals = new ArrayList<>(); boolean hasMore = false; do { curVals.clear(); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java index 22dac6dd3d4..54e33587ce3 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java @@ -94,7 +94,7 @@ implements Coprocessor, CoprocessorService { return; } scanner = region.getScanner(scan); - List curVals = new ArrayList(); + List curVals = new ArrayList<>(); boolean hasMore = false; do { curVals.clear(); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java index c75fb31237e..6e8c571d689 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java @@ -94,7 +94,7 @@ implements Coprocessor, CoprocessorService { throw new DoNotRetryIOException("An expected exception"); } scanner = region.getScanner(scan); - List curVals = new ArrayList(); + List curVals = new ArrayList<>(); boolean hasMore = false; do { curVals.clear(); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 3ed8a568739..56fdca6db40 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -168,8 +168,7 @@ public class TestClassLoading { // verify that the coprocessors were loaded boolean foundTableRegion=false; boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true; - Map> regionsActiveClassLoaders = - new HashMap>(); + Map> regionsActiveClassLoaders = new HashMap<>(); MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { @@ -209,7 +208,7 @@ public class TestClassLoading { " of external jar files", 2, CoprocessorClassLoader.getAllCached().size()); //check if region active classloaders are shared across all RS regions - Set externalClassLoaders = new HashSet( + Set externalClassLoaders = new HashSet<>( CoprocessorClassLoader.getAllCached()); for (Map.Entry> regionCP : regionsActiveClassLoaders.entrySet()) { assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached." @@ -312,7 +311,7 @@ public class TestClassLoading { // add 2 coprocessor by using new htd.addCoprocessor() api htd.addCoprocessor(cpName5, new Path(getLocalPath(jarFile5)), Coprocessor.PRIORITY_USER, null); - Map kvs = new HashMap(); + Map kvs = new HashMap<>(); kvs.put("k1", "v1"); kvs.put("k2", "v2"); kvs.put("k3", "v3"); @@ -466,8 +465,7 @@ public class TestClassLoading { * @return subset of all servers. */ Map serversForTable(String tableName) { - Map serverLoadHashMap = - new HashMap(); + Map serverLoadHashMap = new HashMap<>(); for(Map.Entry server: TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). getOnlineServers().entrySet()) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java index 547b7e9fa1b..adfd8d53e5e 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java @@ -124,7 +124,7 @@ public class TestCoprocessorEndpoint { public Long call(ColumnAggregationProtos.ColumnAggregationService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteStringer.wrap(family)); @@ -193,7 +193,7 @@ public class TestCoprocessorEndpoint { throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call returning result " + response); @@ -226,7 +226,7 @@ public class TestCoprocessorEndpoint { throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call returning result " + response); @@ -271,7 +271,7 @@ public class TestCoprocessorEndpoint { public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.echo(controller, request, callback); TestProtos.EchoResponseProto response = callback.get(); LOG.debug("Batch.Call got result " + response); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java index 0af655a3a7c..07831319b19 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java @@ -117,7 +117,7 @@ public class TestCoprocessorTableEndpoint { public Long call(ColumnAggregationProtos.ColumnAggregationService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java index 69742a6f7e8..9dc4822168b 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java @@ -73,8 +73,7 @@ public class TestRegionServerCoprocessorEndpoint { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, TEST_UTIL.getAdmin().coprocessorService(serverName)); @@ -91,8 +90,7 @@ public class TestRegionServerCoprocessorEndpoint { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, TEST_UTIL.getAdmin().coprocessorService(serverName)); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java index 07d20421d0c..ed5302735f7 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java @@ -166,10 +166,9 @@ public class TestRowProcessorEndpoint { ProcessResponse protoResult = service.process(null, request); FriendsOfFriendsProcessorResponse response = FriendsOfFriendsProcessorResponse.parseFrom(protoResult.getRowProcessorResult()); - Set result = new HashSet(); + Set result = new HashSet<>(); result.addAll(response.getResultList()); - Set expected = - new HashSet(Arrays.asList(new String[]{"d", "e", "f", "g"})); + Set expected = new HashSet<>(Arrays.asList(new String[]{"d", "e", "f", "g"})); Get get = new Get(ROW); LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells())); assertEquals(expected, result); @@ -349,7 +348,7 @@ public class TestRowProcessorEndpoint { public void process(long now, HRegion region, List mutations, WALEdit walEdit) throws IOException { // Scan current counter - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); Scan scan = new Scan(row, row); scan.addColumn(FAM, COUNTER); doScan(region, scan, kvs); @@ -398,7 +397,7 @@ public class TestRowProcessorEndpoint { BaseRowProcessor { byte[] row = null; byte[] person = null; - final Set result = new HashSet(); + final Set result = new HashSet<>(); /** * Empty constructor for Writable @@ -432,7 +431,7 @@ public class TestRowProcessorEndpoint { @Override public void process(long now, HRegion region, List mutations, WALEdit walEdit) throws IOException { - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); { // First scan to get friends of the person Scan scan = new Scan(row, row); scan.addColumn(FAM, person); @@ -497,7 +496,7 @@ public class TestRowProcessorEndpoint { @Override public Collection getRowsToLock() { - List rows = new ArrayList(2); + List rows = new ArrayList<>(2); rows.add(row1); rows.add(row2); return rows; @@ -522,8 +521,8 @@ public class TestRowProcessorEndpoint { now = myTimer.getAndIncrement(); // Scan both rows - List kvs1 = new ArrayList(); - List kvs2 = new ArrayList(); + List kvs1 = new ArrayList<>(); + List kvs2 = new ArrayList<>(); doScan(region, new Scan(row1, row1), kvs1); doScan(region, new Scan(row2, row2), kvs2); @@ -538,7 +537,7 @@ public class TestRowProcessorEndpoint { swapped = !swapped; // Add and delete keyvalues - List> kvs = new ArrayList>(2); + List> kvs = new ArrayList<>(2); kvs.add(kvs1); kvs.add(kvs2); byte[][] rows = new byte[][]{row1, row2}; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java index 06e45eb7b7f..323999d59af 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java @@ -64,7 +64,7 @@ public class SecureBulkLoadEndpointClient { ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); PrepareBulkLoadRequest request = PrepareBulkLoadRequest.newBuilder() @@ -92,7 +92,7 @@ public class SecureBulkLoadEndpointClient { ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); CleanupBulkLoadRequest request = CleanupBulkLoadRequest.newBuilder() @@ -133,7 +133,7 @@ public class SecureBulkLoadEndpointClient { } List protoFamilyPaths = - new ArrayList(familyPaths.size()); + new ArrayList<>(familyPaths.size()); for(Pair el: familyPaths) { protoFamilyPaths.add(ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder() .setFamily(ByteStringer.wrap(el.getFirst())) @@ -148,8 +148,7 @@ public class SecureBulkLoadEndpointClient { ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.secureBulkLoadHFiles(controller, request, rpcCallback); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java index c0d2719f4a2..10a4d19c5af 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java @@ -92,8 +92,7 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS // create HFiles for different column families FileSystem fs = UTIL.getTestFileSystem(); byte[] val = Bytes.toBytes(String.format("%010d", iteration)); - final List> famPaths = new ArrayList>( - NUM_CFS); + final List> famPaths = new ArrayList<>(NUM_CFS); for (int i = 0; i < NUM_CFS; i++) { Path hfile = new Path(dir, family(i)); byte[] fam = Bytes.toBytes(family(i)); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java index 9bff701c396..83c7dbfded5 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java @@ -198,7 +198,7 @@ public class TestServerCustomProtocol { @Override public Integer call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.count(null, PingProtos.CountRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getCount(); } @@ -215,7 +215,7 @@ public class TestServerCustomProtocol { @Override public Integer call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.increment(null, PingProtos.IncrementCountRequest.newBuilder().setDiff(diff).build(), rpcCallback); @@ -253,7 +253,7 @@ public class TestServerCustomProtocol { @Override public String call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); if (send != null) builder.setName(send); instance.hello(null, builder.build(), rpcCallback); @@ -272,7 +272,7 @@ public class TestServerCustomProtocol { @Override public String call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); // Call ping on same instance. Use result calling hello on same instance. builder.setName(doPing(instance)); @@ -291,7 +291,7 @@ public class TestServerCustomProtocol { @Override public String call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); PingProtos.NoopRequest.Builder builder = PingProtos.NoopRequest.newBuilder(); instance.noop(null, builder.build(), rpcCallback); rpcCallback.get(); @@ -311,7 +311,7 @@ public class TestServerCustomProtocol { @Override public String call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.ping(null, PingProtos.PingRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getPong(); } @@ -406,7 +406,7 @@ public class TestServerCustomProtocol { private static String doPing(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.ping(null, PingProtos.PingRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getPong(); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java index f54c6325d18..75f8ee20a27 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java @@ -71,7 +71,7 @@ public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplication * Prepare 16 random hfile ranges required for creating hfiles */ Iterator randomHFileRangeListIterator = null; - Set randomHFileRanges = new HashSet(16); + Set randomHFileRanges = new HashSet<>(16); for (int i = 0; i < 16; i++) { randomHFileRanges.add(UUID.randomUUID().toString()); } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java index 7e6c290d2b2..79ff25b9908 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java @@ -133,9 +133,9 @@ public class BulkDeleteEndpoint extends BulkDeleteService implements Coprocessor // filter and having necessary column(s). scanner = region.getScanner(scan); while (hasMore) { - List> deleteRows = new ArrayList>(rowBatchSize); + List> deleteRows = new ArrayList<>(rowBatchSize); for (int i = 0; i < rowBatchSize; i++) { - List results = new ArrayList(); + List results = new ArrayList<>(); hasMore = scanner.next(results); if (results.size() > 0) { deleteRows.add(results); @@ -202,14 +202,14 @@ public class BulkDeleteEndpoint extends BulkDeleteService implements Coprocessor byte[] row = CellUtil.cloneRow(deleteRow.get(0)); Delete delete = new Delete(row, ts); if (deleteType == DeleteType.FAMILY) { - Set families = new TreeSet(Bytes.BYTES_COMPARATOR); + Set families = new TreeSet<>(Bytes.BYTES_COMPARATOR); for (Cell kv : deleteRow) { if (families.add(CellUtil.cloneFamily(kv))) { delete.addFamily(CellUtil.cloneFamily(kv), ts); } } } else if (deleteType == DeleteType.COLUMN) { - Set columns = new HashSet(); + Set columns = new HashSet<>(); for (Cell kv : deleteRow) { Column column = new Column(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv)); if (columns.add(column)) { @@ -231,7 +231,7 @@ public class BulkDeleteEndpoint extends BulkDeleteService implements Coprocessor noOfVersionsToDelete++; } } else { - Set columns = new HashSet(); + Set columns = new HashSet<>(); for (Cell kv : deleteRow) { Column column = new Column(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv)); // Only one version of particular column getting deleted. diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java index c2387c5291a..36d848831fe 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java @@ -75,7 +75,7 @@ public class RowCountEndpoint extends ExampleProtos.RowCountService InternalScanner scanner = null; try { scanner = env.getRegion().getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); boolean hasMore = false; byte[] lastRow = null; long count = 0; @@ -115,7 +115,7 @@ public class RowCountEndpoint extends ExampleProtos.RowCountService InternalScanner scanner = null; try { scanner = env.getRegion().getScanner(new Scan()); - List results = new ArrayList(); + List results = new ArrayList<>(); boolean hasMore = false; long count = 0; do { diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java index 1dab6331f26..01e9ef37cc5 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java @@ -103,7 +103,7 @@ public class IndexBuilder extends Configured implements Tool { String[] fields = configuration.getStrings("index.fields"); String familyName = configuration.get("index.familyname"); family = Bytes.toBytes(familyName); - indexes = new TreeMap(Bytes.BYTES_COMPARATOR); + indexes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(String field : fields) { // if the table is "people" and the field to index is "email", then the // index table will be called "people-email" diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java index b16ef7b4673..cb0cfbbdb22 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java @@ -114,7 +114,7 @@ public class DemoClient { private void run() throws Exception { TTransport transport = new TSocket(host, port); if (secure) { - Map saslProperties = new HashMap(); + Map saslProperties = new HashMap<>(); saslProperties.put(Sasl.QOP, "auth-conf,auth-int,auth"); /** * The Thrift server the DemoClient is trying to connect to @@ -154,7 +154,7 @@ public class DemoClient { // // Create the demo table with two column families, entry: and unused: // - ArrayList columns = new ArrayList(2); + ArrayList columns = new ArrayList<>(2); ColumnDescriptor col; col = new ColumnDescriptor(); col.name = ByteBuffer.wrap(bytes("entry:")); @@ -194,7 +194,7 @@ public class DemoClient { ArrayList mutations; // non-utf8 is fine for data - mutations = new ArrayList(1); + mutations = new ArrayList<>(1); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal)); client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("foo")), @@ -202,19 +202,19 @@ public class DemoClient { // this row name is valid utf8 - mutations = new ArrayList(1); + mutations = new ArrayList<>(1); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(valid), writeToWal)); client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes); // non-utf8 is now allowed in row names because HBase stores values as binary - mutations = new ArrayList(1); + mutations = new ArrayList<>(1); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal)); client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(invalid), mutations, dummyAttributes); // Run a scanner on the rows we just created - ArrayList columnNames = new ArrayList(); + ArrayList columnNames = new ArrayList<>(); columnNames.add(ByteBuffer.wrap(bytes("entry:"))); System.out.println("Starting scanner..."); @@ -238,7 +238,7 @@ public class DemoClient { nf.setGroupingUsed(false); byte[] row = bytes(nf.format(i)); - mutations = new ArrayList(1); + mutations = new ArrayList<>(1); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("unused:")), ByteBuffer.wrap(bytes("DELETE_ME")), writeToWal)); client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); @@ -251,14 +251,14 @@ public class DemoClient { // no-op } - mutations = new ArrayList(2); + mutations = new ArrayList<>(2); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), ByteBuffer.wrap(bytes("0")), writeToWal)); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(bytes("FOO")), writeToWal)); client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); Mutation m; - mutations = new ArrayList(2); + mutations = new ArrayList<>(2); m = new Mutation(); m.column = ByteBuffer.wrap(bytes("entry:foo")); m.isDelete = true; @@ -270,7 +270,7 @@ public class DemoClient { client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); - mutations = new ArrayList(); + mutations = new ArrayList<>(); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), ByteBuffer.wrap(bytes(Integer.toString(i))), writeToWal)); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:sqr")), ByteBuffer.wrap(bytes(Integer.toString(i * i))), writeToWal)); client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); @@ -347,7 +347,7 @@ public class DemoClient { private void printRow(TRowResult rowResult) { // copy values into a TreeMap to get them in sorted order - TreeMap sorted = new TreeMap(); + TreeMap sorted = new TreeMap<>(); for (Map.Entry column : rowResult.columns.entrySet()) { sorted.put(utf8(column.getKey().array()), column.getValue()); } @@ -379,7 +379,7 @@ public class DemoClient { new Configuration() { @Override public AppConfigurationEntry[] getAppConfigurationEntry(String name) { - Map options = new HashMap(); + Map options = new HashMap<>(); options.put("useKeyTab", "false"); options.put("storeKey", "false"); options.put("doNotPrompt", "true"); diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java index 666891c6e82..25fdc4af340 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java @@ -151,7 +151,7 @@ public class HttpDoAsClient { // // Create the demo table with two column families, entry: and unused: // - ArrayList columns = new ArrayList(2); + ArrayList columns = new ArrayList<>(2); ColumnDescriptor col; col = new ColumnDescriptor(); col.name = ByteBuffer.wrap(bytes("entry:")); @@ -236,7 +236,7 @@ public class HttpDoAsClient { private void printRow(TRowResult rowResult) { // copy values into a TreeMap to get them in sorted order - TreeMap sorted = new TreeMap(); + TreeMap sorted = new TreeMap<>(); for (Map.Entry column : rowResult.columns.entrySet()) { sorted.put(utf8(column.getKey().array()), column.getValue()); } @@ -261,7 +261,7 @@ public class HttpDoAsClient { new Configuration() { @Override public AppConfigurationEntry[] getAppConfigurationEntry(String name) { - Map options = new HashMap(); + Map options = new HashMap<>(); options.put("useKeyTab", "false"); options.put("storeKey", "false"); options.put("doNotPrompt", "true"); diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java index 408379287a0..666997e9787 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java @@ -102,7 +102,7 @@ public class DemoClient { * * The HBase cluster must be secure, allow proxy user. */ - Map saslProperties = new HashMap(); + Map saslProperties = new HashMap<>(); saslProperties.put(Sasl.QOP, "auth-conf,auth-int,auth"); transport = new TSaslClientTransport("GSSAPI", null, user != null ? user : "hbase",// Thrift server user name, should be an authorized proxy user @@ -126,7 +126,7 @@ public class DemoClient { columnValue.setFamily("family1".getBytes()); columnValue.setQualifier("qualifier1".getBytes()); columnValue.setValue("value1".getBytes()); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(columnValue); put.setColumnValues(columnValues); @@ -159,7 +159,7 @@ public class DemoClient { new Configuration() { @Override public AppConfigurationEntry[] getAppConfigurationEntry(String name) { - Map options = new HashMap(); + Map options = new HashMap<>(); options.put("useKeyTab", "false"); options.put("storeKey", "false"); options.put("doNotPrompt", "true"); diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index 69d8521c3ec..e74176080be 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -100,7 +100,7 @@ public class MemcachedBlockCache implements BlockCache { // case. String serverListString = c.get(MEMCACHED_CONFIG_KEY,"localhost:11211"); String[] servers = serverListString.split(","); - List serverAddresses = new ArrayList(servers.length); + List serverAddresses = new ArrayList<>(servers.length); for (String s:servers) { serverAddresses.add(Addressing.createInetSocketAddressFromHostAndPortStr(s)); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java index 78442ba4c44..be6d6d1809c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java @@ -34,7 +34,7 @@ public class CompatibilitySingletonFactory extends CompatibilityFactory { public static enum SingletonStorage { INSTANCE; private final Object lock = new Object(); - private final Map instances = new HashMap(); + private final Map instances = new HashMap<>(); } private static final Log LOG = LogFactory.getLog(CompatibilitySingletonFactory.class); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java index f94205921d2..168f6c74fa5 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java @@ -55,9 +55,8 @@ public class TestCompatibilitySingletonFactory { @Test public void testGetInstance() throws Exception { - List callables = - new ArrayList(ITERATIONS); - List resultStrings = new ArrayList(ITERATIONS); + List callables = new ArrayList<>(ITERATIONS); + List resultStrings = new ArrayList<>(ITERATIONS); // Create the callables. diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java index 76bbb099b2e..78893ab6ef1 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java @@ -28,10 +28,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourceFactory { private enum SourceStorage { INSTANCE; - HashMap - sources = - new HashMap(); - + HashMap sources = new HashMap<>(); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java index f658a27037b..c304fb918c9 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java @@ -46,7 +46,7 @@ public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceIm return size() > mruCap; } }; - private Map costFunctionDescs = new ConcurrentHashMap(); + private Map costFunctionDescs = new ConcurrentHashMap<>(); /** * Calculates the mru cache capacity from the metrics size @@ -79,7 +79,7 @@ public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceIm synchronized (stochasticCosts) { Map costs = stochasticCosts.get(tableName); if (costs == null) { - costs = new ConcurrentHashMap(); + costs = new ConcurrentHashMap<>(); } costs.put(costFunctionName, cost); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java index 7905561fe9c..565b853e753 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java @@ -45,14 +45,14 @@ public final class Interns { CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) .build(new CacheLoader>() { public ConcurrentHashMap load(String key) { - return new ConcurrentHashMap(); + return new ConcurrentHashMap<>(); } }); private static LoadingCache> tagCache = CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.DAYS) .build(new CacheLoader>() { public ConcurrentHashMap load(MetricsInfo key) { - return new ConcurrentHashMap(); + return new ConcurrentHashMap<>(); } }); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java index a968acacd6a..3e4016d4a80 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java @@ -73,7 +73,7 @@ public class MetricSampleQuantiles { public MetricSampleQuantiles(MetricQuantile[] quantiles) { this.quantiles = Arrays.copyOf(quantiles, quantiles.length); - this.samples = new LinkedList(); + this.samples = new LinkedList<>(); } /** @@ -235,7 +235,7 @@ public class MetricSampleQuantiles { synchronized public Map snapshot() throws IOException { // flush the buffer first for best results insertBatch(); - Map values = new HashMap(quantiles.length); + Map values = new HashMap<>(quantiles.length); for (int i = 0; i < quantiles.length; i++) { values.put(quantiles[i], query(quantiles[i].quantile)); } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java index 4291eb7e462..19a8ad280d4 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java @@ -37,9 +37,9 @@ import static org.junit.Assert.*; * A helper class that will allow tests to get into hadoop2's metrics2 values. */ public class MetricsAssertHelperImpl implements MetricsAssertHelper { - private Map tags = new HashMap(); - private Map gauges = new HashMap(); - private Map counters = new HashMap(); + private Map tags = new HashMap<>(); + private Map gauges = new HashMap<>(); + private Map counters = new HashMap<>(); public class MockMetricsBuilder implements MetricsCollector { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java index d35ef849307..431ba421e57 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java @@ -317,7 +317,7 @@ public class DistributedHBaseCluster extends HBaseCluster { } protected boolean restoreMasters(ClusterStatus initial, ClusterStatus current) { - List deferred = new ArrayList(); + List deferred = new ArrayList<>(); //check whether current master has changed final ServerName initMaster = initial.getMaster(); if (!ServerName.isSameHostnameAndPort(initMaster, current.getMaster())) { @@ -371,8 +371,8 @@ public class DistributedHBaseCluster extends HBaseCluster { } } else { //current master has not changed, match up backup masters - Set toStart = new TreeSet(new ServerNameIgnoreStartCodeComparator()); - Set toKill = new TreeSet(new ServerNameIgnoreStartCodeComparator()); + Set toStart = new TreeSet<>(new ServerNameIgnoreStartCodeComparator()); + Set toKill = new TreeSet<>(new ServerNameIgnoreStartCodeComparator()); toStart.addAll(initial.getBackupMasters()); toKill.addAll(current.getBackupMasters()); @@ -429,8 +429,8 @@ public class DistributedHBaseCluster extends HBaseCluster { } protected boolean restoreRegionServers(ClusterStatus initial, ClusterStatus current) { - Set toStart = new TreeSet(new ServerNameIgnoreStartCodeComparator()); - Set toKill = new TreeSet(new ServerNameIgnoreStartCodeComparator()); + Set toStart = new TreeSet<>(new ServerNameIgnoreStartCodeComparator()); + Set toKill = new TreeSet<>(new ServerNameIgnoreStartCodeComparator()); toStart.addAll(initial.getServers()); toKill.addAll(current.getServers()); @@ -443,7 +443,7 @@ public class DistributedHBaseCluster extends HBaseCluster { toKill.remove(server); } - List deferred = new ArrayList(); + List deferred = new ArrayList<>(); for(ServerName sn:toStart) { try { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java index b6f1aeb647c..07014e5d9b4 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java @@ -312,7 +312,7 @@ public class HBaseClusterManager extends Configured implements ClusterManager { LOG.info("Executed remote command, exit code:" + shell.getExitCode() + " , output:" + shell.getOutput()); - return new Pair(shell.getExitCode(), shell.getOutput()); + return new Pair<>(shell.getExitCode(), shell.getOutput()); } private Pair execWithRetries(String hostname, ServiceType service, String... cmd) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java index 5d797221bbb..2d3693ab023 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java @@ -125,17 +125,13 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase { protected int numThreads, numRegions; - ConcurrentHashMap namespaceMap = - new ConcurrentHashMap(); + ConcurrentHashMap namespaceMap = new ConcurrentHashMap<>(); - ConcurrentHashMap enabledTables = - new ConcurrentHashMap(); + ConcurrentHashMap enabledTables = new ConcurrentHashMap<>(); - ConcurrentHashMap disabledTables = - new ConcurrentHashMap(); + ConcurrentHashMap disabledTables = new ConcurrentHashMap<>(); - ConcurrentHashMap deletedTables = - new ConcurrentHashMap(); + ConcurrentHashMap deletedTables = new ConcurrentHashMap<>(); @Override public void setUpCluster() throws Exception { @@ -256,7 +252,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase { if (namespaceMap.isEmpty()) { return null; } - ArrayList namespaceList = new ArrayList(namespaceMap.keySet()); + ArrayList namespaceList = new ArrayList<>(namespaceMap.keySet()); String randomKey = namespaceList.get(RandomUtils.nextInt(namespaceList.size())); NamespaceDescriptor randomNsd = namespaceMap.get(randomKey); // remove from namespaceMap @@ -396,7 +392,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase { if (tableMap.isEmpty()) { return null; } - ArrayList tableList = new ArrayList(tableMap.keySet()); + ArrayList tableList = new ArrayList<>(tableMap.keySet()); TableName randomKey = tableList.get(RandomUtils.nextInt(tableList.size())); HTableDescriptor randomHtd = tableMap.get(randomKey); // remove from tableMap @@ -770,7 +766,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase { Admin admin = connection.getAdmin(); TableName tableName = selected.getTableName(); try (Table table = connection.getTable(tableName)){ - ArrayList regionInfos = new ArrayList(admin.getTableRegions( + ArrayList regionInfos = new ArrayList<>(admin.getTableRegions( selected.getTableName())); int numRegions = regionInfos.size(); // average number of rows to be added per action to each region diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java index d4bcacd524a..7b6635ed4b9 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java @@ -207,7 +207,7 @@ public class IntegrationTestIngest extends IntegrationTestBase { } protected String[] getArgsForLoadTestToolInitTable() { - List args = new ArrayList(); + List args = new ArrayList<>(); args.add("-tn"); args.add(getTablename().getNameAsString()); // pass all remaining args from conf with keys . @@ -225,7 +225,7 @@ public class IntegrationTestIngest extends IntegrationTestBase { protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey, long numKeys) { - List args = new ArrayList(11); + List args = new ArrayList<>(11); args.add("-tn"); args.add(getTablename().getNameAsString()); args.add("-families"); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithACL.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithACL.java index 82eef1a1206..d1292790b8b 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithACL.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithACL.java @@ -68,7 +68,7 @@ public class IntegrationTestIngestWithACL extends IntegrationTestIngest { protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey, long numKeys) { String[] args = super.getArgsForLoadTestTool(mode, modeSpecificArg, startKey, numKeys); - List tmp = new ArrayList(Arrays.asList(args)); + List tmp = new ArrayList<>(Arrays.asList(args)); tmp.add(HYPHEN + LoadTestTool.OPT_GENERATOR); StringBuilder sb = new StringBuilder(LoadTestDataGeneratorWithACL.class.getName()); sb.append(COLON); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java index 13a5936a380..cd9e355b207 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java @@ -61,7 +61,7 @@ public class IntegrationTestIngestWithMOB extends IntegrationTestIngest { @Override protected String[] getArgsForLoadTestToolInitTable() { - List args = new ArrayList(); + List args = new ArrayList<>(); args.add("-tn"); args.add(getTablename().getNameAsString()); // pass all remaining args from conf with keys . @@ -133,7 +133,7 @@ public class IntegrationTestIngestWithMOB extends IntegrationTestIngest { protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey, long numKeys) { String[] args = super.getArgsForLoadTestTool(mode, modeSpecificArg, startKey, numKeys); - List tmp = new ArrayList(Arrays.asList(args)); + List tmp = new ArrayList<>(Arrays.asList(args)); // LoadTestDataGeneratorMOB:mobColumnFamily:minMobDataSize:maxMobDataSize tmp.add(HIPHEN + LoadTestTool.OPT_GENERATOR); StringBuilder sb = new StringBuilder(LoadTestDataGeneratorWithMOB.class.getName()); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithTags.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithTags.java index f1b2c6849e5..08bd4e5aa97 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithTags.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithTags.java @@ -45,7 +45,7 @@ public class IntegrationTestIngestWithTags extends IntegrationTestIngest { protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey, long numKeys) { String[] args = super.getArgsForLoadTestTool(mode, modeSpecificArg, startKey, numKeys); - List tmp = new ArrayList(Arrays.asList(args)); + List tmp = new ArrayList<>(Arrays.asList(args)); // LoadTestDataGeneratorWithTags:minNumTags:maxNumTags:minTagLength:maxTagLength tmp.add(HIPHEN + LoadTestTool.OPT_GENERATOR); StringBuilder sb = new StringBuilder(LoadTestDataGeneratorWithTags.class.getName()); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java index 133be1a8ece..b7d8dad4bb2 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java @@ -42,31 +42,31 @@ public class IntegrationTestIngestWithVisibilityLabels extends IntegrationTestIn private static final String[] VISIBILITY_EXPS = { "secret & confidential & !private", "topsecret | confidential", "confidential & private", "public", "topsecret & private", "!public | private", "(secret | topsecret) & private" }; - private static final List> AUTHS = new ArrayList>(); + private static final List> AUTHS = new ArrayList<>(); static { - ArrayList tmp = new ArrayList(2); + ArrayList tmp = new ArrayList<>(2); tmp.add("secret"); tmp.add("confidential"); AUTHS.add(tmp); - tmp = new ArrayList(1); + tmp = new ArrayList<>(1); tmp.add("topsecret"); AUTHS.add(tmp); - tmp = new ArrayList(2); + tmp = new ArrayList<>(2); tmp.add("confidential"); tmp.add("private"); AUTHS.add(tmp); - tmp = new ArrayList(1); + tmp = new ArrayList<>(1); tmp.add("public"); AUTHS.add(tmp); - tmp = new ArrayList(2); + tmp = new ArrayList<>(2); tmp.add("topsecret"); tmp.add("private"); AUTHS.add(tmp); - tmp = new ArrayList(1); + tmp = new ArrayList<>(1); tmp.add("confidential"); AUTHS.add(tmp); - tmp = new ArrayList(2); + tmp = new ArrayList<>(2); tmp.add("topsecret"); tmp.add("private"); AUTHS.add(tmp); @@ -88,7 +88,7 @@ public class IntegrationTestIngestWithVisibilityLabels extends IntegrationTestIn protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey, long numKeys) { String[] args = super.getArgsForLoadTestTool(mode, modeSpecificArg, startKey, numKeys); - List tmp = new ArrayList(Arrays.asList(args)); + List tmp = new ArrayList<>(Arrays.asList(args)); tmp.add(HIPHEN + LoadTestTool.OPT_GENERATOR); StringBuilder sb = new StringBuilder(LoadTestDataGeneratorWithVisibilityLabels.class.getName()); sb.append(COLON); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java index 548ff53e961..6efe9d83a03 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java @@ -92,8 +92,7 @@ public class IntegrationTestLazyCfLoading { public static final byte[] VALUE_COLUMN = Bytes.toBytes("val"); public static final long ACCEPTED_VALUE = 1L; - private static final Map columnMap = new TreeMap( - Bytes.BYTES_COMPARATOR); + private static final Map columnMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); private final AtomicLong expectedNumberOfKeys = new AtomicLong(0); private final AtomicLong totalNumberOfKeys = new AtomicLong(0); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java index e609f0b4940..c3c5df36588 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java @@ -266,8 +266,8 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase { format("--nomapred --table=%s --latency --sampleRate=0.1 randomRead 4", tableName); String replicaReadOpts = format("%s %s", replicas, readOpts); - ArrayList resultsWithoutReplicas = new ArrayList(maxIters); - ArrayList resultsWithReplicas = new ArrayList(maxIters); + ArrayList resultsWithoutReplicas = new ArrayList<>(maxIters); + ArrayList resultsWithReplicas = new ArrayList<>(maxIters); // create/populate the table, replicas disabled LOG.debug("Populating table."); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java index 98d53e96c90..b6cfdcda539 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java @@ -126,7 +126,7 @@ public class IntegrationTestRegionReplicaReplication extends IntegrationTestInge protected BlockingQueue createWriteKeysQueue(Configuration conf) { this.delayMs = conf.getLong(String.format("%s.%s", IntegrationTestRegionReplicaReplication.class.getSimpleName(), OPT_READ_DELAY_MS), 5000); - return new ConstantDelayQueue(TimeUnit.MILLISECONDS, delayMs); + return new ConstantDelayQueue<>(TimeUnit.MILLISECONDS, delayMs); } } @@ -145,7 +145,7 @@ public class IntegrationTestRegionReplicaReplication extends IntegrationTestInge protected BlockingQueue createWriteKeysQueue(Configuration conf) { this.delayMs = conf.getLong(String.format("%s.%s", IntegrationTestRegionReplicaReplication.class.getSimpleName(), OPT_READ_DELAY_MS), 5000); - return new ConstantDelayQueue(TimeUnit.MILLISECONDS, delayMs); + return new ConstantDelayQueue<>(TimeUnit.MILLISECONDS, delayMs); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java index 04a3b05fe22..03ba4606d95 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java @@ -336,7 +336,7 @@ public class RESTApiClusterManager extends Configured implements ClusterManager // ClusterManager methods take a "ServiceType" object (e.g. "HBASE_MASTER," "HADOOP_NAMENODE"). // These "service types," which cluster managers call "roles" or "components," need to be mapped // to their corresponding service (e.g. "HBase," "HDFS") in order to be controlled. - private static Map roleServiceType = new HashMap(); + private static Map roleServiceType = new HashMap<>(); static { roleServiceType.put(ServiceType.HADOOP_NAMENODE, Service.HDFS); roleServiceType.put(ServiceType.HADOOP_DATANODE, Service.HDFS); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java index d1a32b1f09a..4c7be8c537d 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java @@ -123,7 +123,7 @@ public class Action { if (count == 1) { return new ServerName [] {}; } - ArrayList tmp = new ArrayList(count); + ArrayList tmp = new ArrayList<>(count); tmp.addAll(regionServers); tmp.remove(master); return tmp.toArray(new ServerName[count-1]); @@ -192,11 +192,11 @@ public class Action { protected void unbalanceRegions(ClusterStatus clusterStatus, List fromServers, List toServers, double fractionOfRegions) throws Exception { - List victimRegions = new LinkedList(); + List victimRegions = new LinkedList<>(); for (ServerName server : fromServers) { ServerLoad serverLoad = clusterStatus.getLoad(server); // Ugh. - List regions = new LinkedList(serverLoad.getRegionsLoad().keySet()); + List regions = new LinkedList<>(serverLoad.getRegionsLoad().keySet()); int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size()); LOG.debug("Removing " + victimRegionCount + " regions from " + server.getServerName()); for (int i = 0; i < victimRegionCount; ++i) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java index ce660006b27..75414aea98b 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/BatchRestartRsAction.java @@ -43,7 +43,7 @@ public class BatchRestartRsAction extends RestartActionBaseAction { List selectedServers = PolicyBasedChaosMonkey.selectRandomItems(getCurrentServers(), ratio); - Set killedServers = new HashSet(); + Set killedServers = new HashSet<>(); for (ServerName server : selectedServers) { // Don't keep killing servers if we're diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java index 7299e79ed16..f5349dc1b6c 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java @@ -49,7 +49,7 @@ public class RestartRandomDataNodeAction extends RestartActionBaseAction { DistributedFileSystem fs = (DistributedFileSystem) FSUtils.getRootDir(getConf()) .getFileSystem(getConf()); DFSClient dfsClient = fs.getClient(); - List hosts = new LinkedList(); + List hosts = new LinkedList<>(); for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) { hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1)); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java index e79ff5b5739..ba251987ef8 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java @@ -59,8 +59,8 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction { (int)(ratio * 100))); List selectedServers = selectServers(); - Queue serversToBeKilled = new LinkedList(selectedServers); - Queue deadServers = new LinkedList(); + Queue serversToBeKilled = new LinkedList<>(selectedServers); + Queue deadServers = new LinkedList<>(); // loop while there are servers to be killed or dead servers to be restarted while ((!serversToBeKilled.isEmpty() || !deadServers.isEmpty()) && !context.isStopping()) { @@ -123,7 +123,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction { @Override protected ServerName[] getCurrentServers() throws IOException { final int count = 4; - List serverNames = new ArrayList(count); + List serverNames = new ArrayList<>(count); for (int i = 0; i < 4; i++) { serverNames.add(ServerName.valueOf(i + ".example.org", i, i)); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java index 1ac14589f81..a40c8b1dbf7 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceKillAndRebalanceAction.java @@ -52,13 +52,13 @@ public class UnbalanceKillAndRebalanceAction extends Action { @Override public void perform() throws Exception { ClusterStatus status = this.cluster.getClusterStatus(); - List victimServers = new LinkedList(status.getServers()); - Set killedServers = new HashSet(); + List victimServers = new LinkedList<>(status.getServers()); + Set killedServers = new HashSet<>(); int liveCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_LIVE * victimServers.size()); int deadCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_DIE * victimServers.size()); Assert.assertTrue((liveCount + deadCount) < victimServers.size()); - List targetServers = new ArrayList(liveCount); + List targetServers = new ArrayList<>(liveCount); for (int i = 0; i < liveCount + deadCount; ++i) { int victimIx = RandomUtils.nextInt(victimServers.size()); targetServers.add(victimServers.remove(victimIx)); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java index 2779bd1ca1a..bdffdb18822 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/UnbalanceRegionsAction.java @@ -48,9 +48,9 @@ public class UnbalanceRegionsAction extends Action { public void perform() throws Exception { LOG.info("Unbalancing regions"); ClusterStatus status = this.cluster.getClusterStatus(); - List victimServers = new LinkedList(status.getServers()); + List victimServers = new LinkedList<>(status.getServers()); int targetServerCount = (int)Math.ceil(fractionOfServers * victimServers.size()); - List targetServers = new ArrayList(targetServerCount); + List targetServers = new ArrayList<>(targetServerCount); for (int i = 0; i < targetServerCount; ++i) { int victimIx = RandomUtils.nextInt(victimServers.size()); targetServers.add(victimServers.remove(victimIx)); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java index 57f7c836495..951f8f8777f 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java @@ -92,7 +92,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey { public static List selectRandomItems(T[] items, float ratio) { int remaining = (int)Math.ceil(items.length * ratio); - List selectedItems = new ArrayList(remaining); + List selectedItems = new ArrayList<>(remaining); for (int i=0; i 0; i++) { if (RandomUtils.nextFloat() < ((float)remaining/(items.length-i))) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/DoActionsOncePolicy.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/DoActionsOncePolicy.java index e03816a1038..35f06eb4c09 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/DoActionsOncePolicy.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/DoActionsOncePolicy.java @@ -31,7 +31,7 @@ public class DoActionsOncePolicy extends PeriodicPolicy { public DoActionsOncePolicy(long periodMs, List actions) { super(periodMs); - this.actions = new ArrayList(actions); + this.actions = new ArrayList<>(actions); } public DoActionsOncePolicy(long periodMs, Action... actions) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java index 89124670971..8b76e49ba50 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java @@ -46,9 +46,9 @@ public class PeriodicRandomActionPolicy extends PeriodicPolicy { public PeriodicRandomActionPolicy(long periodMs, Action... actions) { super(periodMs); - this.actions = new ArrayList>(actions.length); + this.actions = new ArrayList<>(actions.length); for (Action action : actions) { - this.actions.add(new Pair(action, 1)); + this.actions.add(new Pair<>(action, 1)); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java index fd062d190da..e39d0fe03f8 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java @@ -353,7 +353,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase { @Override public List getSplits(JobContext context) throws IOException, InterruptedException { int numSplits = context.getConfiguration().getInt(NUM_MAPS_KEY, NUM_MAPS); - ArrayList ret = new ArrayList(numSplits); + ArrayList ret = new ArrayList<>(numSplits); for (int i = 0; i < numSplits; ++i) { ret.add(new EmptySplit()); } @@ -376,7 +376,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase { chainId = chainId - (chainId % numMapTasks) + taskId; // ensure that chainId is unique per task and across iterations LongWritable[] keys = new LongWritable[] {new LongWritable(chainId)}; - return new FixedRecordReader(keys, keys); + return new FixedRecordReader<>(keys, keys); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index 42b6ae742a2..9d04bf91b0c 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -194,7 +194,7 @@ public class IntegrationTestImportTsv extends Configured implements Tool { util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); args.put(ImportTsv.COLUMNS_CONF_KEY, format("HBASE_ROW_KEY,HBASE_TS_KEY,%s:c1,%s:c2", cf, cf)); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index f87cc86b4a9..bd14c31aaf4 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -297,9 +297,9 @@ public class IntegrationTestMTTR { LOG.info("Starting " + testName + " with " + maxIters + " iterations."); // Array to keep track of times. - ArrayList resultPuts = new ArrayList(maxIters); - ArrayList resultScan = new ArrayList(maxIters); - ArrayList resultAdmin = new ArrayList(maxIters); + ArrayList resultPuts = new ArrayList<>(maxIters); + ArrayList resultScan = new ArrayList<>(maxIters); + ArrayList resultAdmin = new ArrayList<>(maxIters); long start = System.nanoTime(); try { @@ -357,7 +357,7 @@ public class IntegrationTestMTTR { */ private static class TimingResult { DescriptiveStatistics stats = new DescriptiveStatistics(); - ArrayList traces = new ArrayList(10); + ArrayList traces = new ArrayList<>(10); /** * Add a result to this aggregate result. diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index dff1828772a..1b23de8799a 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -350,7 +350,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { public List getSplits(JobContext job) throws IOException, InterruptedException { int numMappers = job.getConfiguration().getInt(GENERATOR_NUM_MAPPERS_KEY, 1); - ArrayList splits = new ArrayList(numMappers); + ArrayList splits = new ArrayList<>(numMappers); for (int i = 0; i < numMappers; i++) { splits.add(new GeneratorInputSplit()); @@ -956,7 +956,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { throws IOException, InterruptedException { Path keysInputDir = new Path(conf.get(SEARCHER_INPUTDIR_KEY)); FileSystem fs = FileSystem.get(conf); - SortedSet result = new TreeSet(Bytes.BYTES_COMPARATOR); + SortedSet result = new TreeSet<>(Bytes.BYTES_COMPARATOR); if (!fs.exists(keysInputDir)) { throw new FileNotFoundException(keysInputDir.toString()); } @@ -977,7 +977,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { private static SortedSet readFileToSearch(final Configuration conf, final FileSystem fs, final LocatedFileStatus keyFileStatus) throws IOException, InterruptedException { - SortedSet result = new TreeSet(Bytes.BYTES_COMPARATOR); + SortedSet result = new TreeSet<>(Bytes.BYTES_COMPARATOR); // Return entries that are flagged Counts.UNDEFINED in the value. Return the row. This is // what is missing. TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID()); @@ -1064,7 +1064,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { */ public static class VerifyReducer extends Reducer { - private ArrayList refs = new ArrayList(); + private ArrayList refs = new ArrayList<>(); private final BytesWritable UNREF = new BytesWritable(addPrefixFlag( Counts.UNREFERENCED.ordinal(), new byte[] {})); private final BytesWritable LOSTFAM = new BytesWritable(addPrefixFlag( diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java index dec565f340d..9eacc5a7fd8 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java @@ -456,7 +456,7 @@ public void cleanUpCluster() throws Exception { throws IOException, InterruptedException { Path keysInputDir = new Path(conf.get(SEARCHER_INPUTDIR_KEY)); FileSystem fs = FileSystem.get(conf); - SortedSet result = new TreeSet(Bytes.BYTES_COMPARATOR); + SortedSet result = new TreeSet<>(Bytes.BYTES_COMPARATOR); if (!fs.exists(keysInputDir)) { throw new FileNotFoundException(keysInputDir.toString()); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java index 141b24db43f..bf534f3f9a5 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java @@ -234,7 +234,7 @@ public class IntegrationTestReplication extends IntegrationTestBigLinkedList { // set the test table to be the table to replicate HashMap> toReplicate = new HashMap<>(); - toReplicate.put(tableName, new ArrayList(0)); + toReplicate.put(tableName, new ArrayList<>(0)); replicationAdmin.addPeer("TestPeer", peerConfig, toReplicate); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java index b7463bd57da..327d8792d60 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java @@ -233,7 +233,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { } private LinkedBlockingQueue insertData() throws IOException, InterruptedException { - LinkedBlockingQueue rowKeys = new LinkedBlockingQueue(25000); + LinkedBlockingQueue rowKeys = new LinkedBlockingQueue<>(25000); BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName); byte[] value = new byte[300]; for (int x = 0; x < 5000; x++) { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java index e6df88a9390..5aa5d88300a 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java @@ -42,8 +42,7 @@ public class ArraySearcherPool { */ private static final Integer MAX_POOL_SIZE = 1000; - protected Queue pool - = new LinkedBlockingQueue(MAX_POOL_SIZE); + protected Queue pool = new LinkedBlockingQueue<>(MAX_POOL_SIZE); public PrefixTreeArraySearcher checkOut(ByteBuff buffer, boolean includesMvccVersion) { PrefixTreeArraySearcher searcher = pool.poll();//will return null if pool is empty diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java index 3ca4236b88d..255c8a37e6d 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeCell.java @@ -87,7 +87,7 @@ public class PrefixTreeCell extends ByteBufferCell implements SettableSequenceId protected int tagsOffset; protected int tagsLength; // Pair to set the value ByteBuffer and its offset - protected ObjectIntPair pair = new ObjectIntPair(); + protected ObjectIntPair pair = new ObjectIntPair<>(); /********************** Cell methods ******************/ diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/EncoderPoolImpl.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/EncoderPoolImpl.java index 8a5ffbad062..a8f0082a4ef 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/EncoderPoolImpl.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/EncoderPoolImpl.java @@ -26,8 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; @InterfaceAudience.Private public class EncoderPoolImpl implements EncoderPool { - private BlockingQueue unusedEncoders = - new LinkedBlockingQueue(); + private BlockingQueue unusedEncoders = new LinkedBlockingQueue<>(); @Override public PrefixTreeEncoder checkOut(OutputStream outputStream, boolean includeMvccVersion) { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/other/LongEncoder.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/other/LongEncoder.java index 3291d7296da..3597fbef34e 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/other/LongEncoder.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/other/LongEncoder.java @@ -60,7 +60,7 @@ public class LongEncoder { /****************** construct ****************************/ public LongEncoder() { - this.uniqueValues = new HashSet(); + this.uniqueValues = new HashSet<>(); } public void reset() { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java index f44017b7b46..e2824b0f810 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java @@ -161,7 +161,7 @@ public class Tokenizer{ } public List getArrays() { - List nodes = new ArrayList(); + List nodes = new ArrayList<>(); root.appendNodesToExternalList(nodes, true, true); List byteArrays = Lists.newArrayListWithCapacity(CollectionUtils.nullSafeSize(nodes)); for (int i = 0; i < nodes.size(); ++i) { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/byterange/impl/ByteRangeHashSet.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/byterange/impl/ByteRangeHashSet.java index 9ce61631265..dbaa50837e0 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/byterange/impl/ByteRangeHashSet.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/byterange/impl/ByteRangeHashSet.java @@ -39,7 +39,7 @@ public class ByteRangeHashSet extends ByteRangeSet { /************************ constructors *****************************/ public ByteRangeHashSet() { - this.uniqueIndexByUniqueRange = new HashMap(); + this.uniqueIndexByUniqueRange = new HashMap<>(); } public ByteRangeHashSet(List rawByteArrays) { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/byterange/impl/ByteRangeTreeSet.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/byterange/impl/ByteRangeTreeSet.java index b5c0b1a9688..4ee7b286230 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/byterange/impl/ByteRangeTreeSet.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/byterange/impl/ByteRangeTreeSet.java @@ -36,7 +36,7 @@ public class ByteRangeTreeSet extends ByteRangeSet { /************************ constructors *****************************/ public ByteRangeTreeSet() { - this.uniqueIndexByUniqueRange = new TreeMap(); + this.uniqueIndexByUniqueRange = new TreeMap<>(); } public ByteRangeTreeSet(List rawByteArrays) { diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java index 39140a379b1..1f9b45917a4 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java @@ -43,7 +43,7 @@ public class TestRowDataExerciseFInts extends BaseTestRowData{ static List rows; static{ - List rowStrings = new ArrayList(16); + List rowStrings = new ArrayList<>(16); rowStrings.add("com.edsBlog/directoryAa/pageAaa"); rowStrings.add("com.edsBlog/directoryAa/pageBbb"); rowStrings.add("com.edsBlog/directoryAa/pageCcc"); diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java index 2d3901f1437..a7edfe79149 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java @@ -46,7 +46,7 @@ public class TestRowDataTrivialWithTags extends BaseTestRowData{ static List d = Lists.newArrayList(); static { - List tagList = new ArrayList(2); + List tagList = new ArrayList<>(2); Tag t = new ArrayBackedTag((byte) 1, "visisbility"); tagList.add(t); t = new ArrayBackedTag((byte) 2, "ACL"); diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java index a71daaaf887..02766179c35 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java @@ -41,7 +41,7 @@ public class TestRowDataUrls extends BaseTestRowData{ static List rows; static{ - List rowStrings = new ArrayList(16); + List rowStrings = new ArrayList<>(16); rowStrings.add("com.edsBlog/directoryAa/pageAaa"); rowStrings.add("com.edsBlog/directoryAa/pageBbb"); rowStrings.add("com.edsBlog/directoryAa/pageCcc"); diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java index bccff6da101..d4fbb4d5b87 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java @@ -27,7 +27,7 @@ public class TestTimestampDataBasic implements TestTimestampData { @Override public List getInputs() { - List d = new ArrayList(5); + List d = new ArrayList<>(5); d.add(5L); d.add(3L); d.add(0L); @@ -43,7 +43,7 @@ public class TestTimestampDataBasic implements TestTimestampData { @Override public List getOutputs() { - List d = new ArrayList(4); + List d = new ArrayList<>(4); d.add(0L); d.add(1L); d.add(3L); diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java index 2a5dcaedf39..d0bc8378927 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java @@ -29,7 +29,7 @@ public class TestTimestampDataNumbers implements TestTimestampData { @Override public List getInputs() { - List d = new ArrayList(5); + List d = new ArrayList<>(5); d.add(5L << shift); d.add(3L << shift); d.add(7L << shift); @@ -45,7 +45,7 @@ public class TestTimestampDataNumbers implements TestTimestampData { @Override public List getOutputs() { - List d = new ArrayList(4); + List d = new ArrayList<>(4); d.add(1L << shift); d.add(3L << shift); d.add(5L << shift); diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java index 2186528d795..3320d66baba 100644 --- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java +++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java @@ -29,7 +29,7 @@ public class TestTimestampDataRepeats implements TestTimestampData { @Override public List getInputs() { - List d = new ArrayList(5); + List d = new ArrayList<>(5); d.add(t); d.add(t); d.add(t); @@ -45,7 +45,7 @@ public class TestTimestampDataRepeats implements TestTimestampData { @Override public List getOutputs() { - List d = new ArrayList(); + List d = new ArrayList<>(); return d; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index b38b96c6520..0856aa22eb5 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -209,33 +209,28 @@ public class ProcedureExecutor { * Once a Root-Procedure completes (success or failure), the result will be added to this map. * The user of ProcedureExecutor should call getResult(procId) to get the result. */ - private final ConcurrentHashMap completed = - new ConcurrentHashMap(); + private final ConcurrentHashMap completed = new ConcurrentHashMap<>(); /** * Map the the procId returned by submitProcedure(), the Root-ProcID, to the RootProcedureState. * The RootProcedureState contains the execution stack of the Root-Procedure, * It is added to the map by submitProcedure() and removed on procedure completion. */ - private final ConcurrentHashMap rollbackStack = - new ConcurrentHashMap(); + private final ConcurrentHashMap rollbackStack = new ConcurrentHashMap<>(); /** * Helper map to lookup the live procedures by ID. * This map contains every procedure. root-procedures and subprocedures. */ - private final ConcurrentHashMap procedures = - new ConcurrentHashMap(); + private final ConcurrentHashMap procedures = new ConcurrentHashMap<>(); /** * Helper map to lookup whether the procedure already issued from the same client. * This map contains every root procedure. */ - private final ConcurrentHashMap nonceKeysToProcIdsMap = - new ConcurrentHashMap(); + private final ConcurrentHashMap nonceKeysToProcIdsMap = new ConcurrentHashMap<>(); - private final CopyOnWriteArrayList listeners = - new CopyOnWriteArrayList(); + private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); private Configuration conf; private ThreadGroup threadGroup; @@ -399,7 +394,7 @@ public class ProcedureExecutor { break; case WAITING_TIMEOUT: if (waitingSet == null) { - waitingSet = new HashSet(); + waitingSet = new HashSet<>(); } waitingSet.add(proc); break; @@ -498,7 +493,7 @@ public class ProcedureExecutor { // Create the workers workerId.set(0); - workerThreads = new CopyOnWriteArrayList(); + workerThreads = new CopyOnWriteArrayList<>(); for (int i = 0; i < corePoolSize; ++i) { workerThreads.add(new WorkerThread(threadGroup)); } @@ -979,8 +974,7 @@ public class ProcedureExecutor { * @return the procedures in a list */ public List listProcedures() { - final List procedureLists = - new ArrayList(procedures.size() + completed.size()); + final List procedureLists = new ArrayList<>(procedures.size() + completed.size()); for (Map.Entry p: procedures.entrySet()) { procedureLists.add(ProcedureUtil.convertToProcedureInfo(p.getValue())); } @@ -1614,7 +1608,7 @@ public class ProcedureExecutor { // Timeout Thread // ========================================================================== private final class TimeoutExecutorThread extends StoppableThread { - private final DelayQueue queue = new DelayQueue(); + private final DelayQueue queue = new DelayQueue<>(); public TimeoutExecutorThread(final ThreadGroup group) { super(group, "ProcedureTimeoutExecutor"); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java index 2f118b7ce6a..4f9b1361992 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java @@ -147,7 +147,7 @@ class RootProcedureState { state = State.FAILED; } if (subprocStack == null) { - subprocStack = new ArrayList(); + subprocStack = new ArrayList<>(); } proc.addStackIndex(subprocStack.size()); subprocStack.add(proc); @@ -156,7 +156,7 @@ class RootProcedureState { protected synchronized void addSubProcedure(final Procedure proc) { if (!proc.hasParent()) return; if (subprocs == null) { - subprocs = new HashSet(); + subprocs = new HashSet<>(); } subprocs.add(proc); } @@ -173,7 +173,7 @@ class RootProcedureState { int[] stackIndexes = proc.getStackIndexes(); if (stackIndexes != null) { if (subprocStack == null) { - subprocStack = new ArrayList(); + subprocStack = new ArrayList<>(); } int diff = (1 + stackIndexes[stackIndexes.length - 1]) - subprocStack.size(); if (diff > 0) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index 3f9a7b71dfa..5c3a4c726aa 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -130,7 +130,7 @@ public abstract class StateMachineProcedure */ protected void addChildProcedure(Procedure... subProcedure) { if (subProcList == null) { - subProcList = new ArrayList(subProcedure.length); + subProcList = new ArrayList<>(subProcedure.length); } for (int i = 0; i < subProcedure.length; ++i) { Procedure proc = subProcedure[i]; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java index 0e0e46fb824..63eff370916 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java @@ -25,8 +25,7 @@ import java.util.concurrent.atomic.AtomicBoolean; * Base class for {@link ProcedureStore}s. */ public abstract class ProcedureStoreBase implements ProcedureStore { - private final CopyOnWriteArrayList listeners = - new CopyOnWriteArrayList(); + private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); private final AtomicBoolean running = new AtomicBoolean(false); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java index ec596073ac0..5ad96e1a5e1 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java @@ -155,7 +155,7 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool { options.addOption("h", "help", false, "Output help message"); options.addOption("f", "file", true, "File to print"); - final List files = new ArrayList(); + final List files = new ArrayList<>(); try { CommandLine cmd = new PosixParser().parse(options, args); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index 1841aa4d668..4712c306d06 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -125,7 +125,7 @@ public class WALProcedureStore extends ProcedureStoreBase { private final FileSystem fs; private final Path walDir; - private final AtomicReference syncException = new AtomicReference(); + private final AtomicReference syncException = new AtomicReference<>(); private final AtomicBoolean loading = new AtomicBoolean(true); private final AtomicBoolean inSync = new AtomicBoolean(false); private final AtomicLong totalSynced = new AtomicLong(0); @@ -304,7 +304,7 @@ public class WALProcedureStore extends ProcedureStoreBase { public ArrayList getActiveLogs() { lock.lock(); try { - return new ArrayList(logs); + return new ArrayList<>(logs); } finally { lock.unlock(); } @@ -395,7 +395,7 @@ public class WALProcedureStore extends ProcedureStoreBase { @Override public void markCorruptedWAL(ProcedureWALFile log, IOException e) { if (corruptedLogs == null) { - corruptedLogs = new HashSet(); + corruptedLogs = new HashSet<>(); } corruptedLogs.add(log); // TODO: sideline corrupted log @@ -790,7 +790,7 @@ public class WALProcedureStore extends ProcedureStoreBase { public ArrayList getSyncMetrics() { lock.lock(); try { - return new ArrayList(syncMetricsBuffer); + return new ArrayList<>(syncMetricsBuffer); } finally { lock.unlock(); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java index c1b4e9b537b..226666f4ec0 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -183,7 +183,7 @@ public class ProcedureTestingUtility { public static long submitAndWait(Configuration conf, TEnv env, Procedure proc) throws IOException { NoopProcedureStore procStore = new NoopProcedureStore(); - ProcedureExecutor procExecutor = new ProcedureExecutor(conf, env, procStore); + ProcedureExecutor procExecutor = new ProcedureExecutor<>(conf, env, procStore); procStore.start(1); procExecutor.start(1, false); try { @@ -446,9 +446,9 @@ public class ProcedureTestingUtility { } public static class LoadCounter implements ProcedureStore.ProcedureLoader { - private final ArrayList corrupted = new ArrayList(); - private final ArrayList completed = new ArrayList(); - private final ArrayList runnable = new ArrayList(); + private final ArrayList corrupted = new ArrayList<>(); + private final ArrayList completed = new ArrayList<>(); + private final ArrayList runnable = new ArrayList<>(); private Set procIds; private long maxProcId = 0; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java index da6d96093f6..38adbf5cc46 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java @@ -129,7 +129,7 @@ public class TestProcedureExecution { @Test(timeout=30000) public void testBadSubprocList() { - List state = new ArrayList(); + List state = new ArrayList<>(); Procedure subProc2 = new TestSequentialProcedure("subProc2", state); Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2, NULL_PROC); Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); @@ -151,7 +151,7 @@ public class TestProcedureExecution { @Test(timeout=30000) public void testSingleSequentialProc() { - List state = new ArrayList(); + List state = new ArrayList<>(); Procedure subProc2 = new TestSequentialProcedure("subProc2", state); Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2); Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); @@ -166,7 +166,7 @@ public class TestProcedureExecution { @Test(timeout=30000) public void testSingleSequentialProcRollback() { - List state = new ArrayList(); + List state = new ArrayList<>(); Procedure subProc2 = new TestSequentialProcedure("subProc2", state, new TestProcedureException("fail test")); Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2); @@ -295,7 +295,7 @@ public class TestProcedureExecution { @Test(timeout=30000) public void testAbortTimeout() { final int PROC_TIMEOUT_MSEC = 2500; - List state = new ArrayList(); + List state = new ArrayList<>(); Procedure proc = new TestWaitingProcedure("wproc", state, false); proc.setTimeout(PROC_TIMEOUT_MSEC); long startTime = EnvironmentEdgeManager.currentTime(); @@ -313,7 +313,7 @@ public class TestProcedureExecution { @Test(timeout=30000) public void testAbortTimeoutWithChildren() { - List state = new ArrayList(); + List state = new ArrayList<>(); Procedure proc = new TestWaitingProcedure("wproc", state, true); proc.setTimeout(2500); long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java index f838c25d26b..bd614e38c96 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java @@ -154,7 +154,7 @@ public class TestProcedureReplayOrder { } private static class TestProcedureEnv { - private ArrayList execList = new ArrayList(); + private ArrayList execList = new ArrayList<>(); private AtomicLong execTimestamp = new AtomicLong(0); public long getExecId() { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java index b8cd8ffc8f3..42176937e13 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java @@ -79,8 +79,7 @@ public class TestProcedureSchedulerConcurrency { final AtomicInteger waitCount = new AtomicInteger(0); final AtomicInteger wakeCount = new AtomicInteger(0); - final ConcurrentSkipListSet waitQueue = - new ConcurrentSkipListSet(); + final ConcurrentSkipListSet waitQueue = new ConcurrentSkipListSet<>(); threads[0] = new Thread() { @Override public void run() { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java index ba89768b733..0146bc7ea39 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java @@ -161,7 +161,7 @@ public class TestProcedureSuspended { } public static class TestLockProcedure extends Procedure { - private final ArrayList timestamps = new ArrayList(); + private final ArrayList timestamps = new ArrayList<>(); private final String key; private boolean triggerRollback = false; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java index 165179db9ee..b1d06693942 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java @@ -204,7 +204,7 @@ public class TestYieldProcedures { public boolean isRollback() { return rollback; } } - private final ArrayList executionInfo = new ArrayList(); + private final ArrayList executionInfo = new ArrayList<>(); private final AtomicBoolean aborted = new AtomicBoolean(false); private final boolean throwInterruptOnceOnEachStep; private final boolean abortOnFinalStep; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java index 1e2db4d599d..a2cd70fdb98 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java @@ -36,13 +36,13 @@ public class TestDelayedUtil { Object o1 = new Object(); Object o2 = new Object(); ZeroDelayContainer lnull = new ZeroDelayContainer(null); - ZeroDelayContainer l10a = new ZeroDelayContainer(10L); + ZeroDelayContainer l10a = new ZeroDelayContainer<>(10L); ZeroDelayContainer l10b = new ZeroDelayContainer(10L); ZeroDelayContainer l15 = new ZeroDelayContainer(15L); - ZeroDelayContainer onull = new ZeroDelayContainer(null); - ZeroDelayContainer o1ca = new ZeroDelayContainer(o1); - ZeroDelayContainer o1cb = new ZeroDelayContainer(o1); - ZeroDelayContainer o2c = new ZeroDelayContainer(o2); + ZeroDelayContainer onull = new ZeroDelayContainer<>(null); + ZeroDelayContainer o1ca = new ZeroDelayContainer<>(o1); + ZeroDelayContainer o1cb = new ZeroDelayContainer<>(o1); + ZeroDelayContainer o2c = new ZeroDelayContainer<>(o2); ZeroDelayContainer[] items = new ZeroDelayContainer[] { lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java index 0ccd9f9a226..28b390997fb 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java @@ -108,7 +108,7 @@ public final class ForeignExceptionUtil { // if there is no stack trace, ignore it and just return the message if (trace == null) return null; // build the stack trace for the message - List pbTrace = new ArrayList(trace.length); + List pbTrace = new ArrayList<>(trace.length); for (StackTraceElement elem : trace) { StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder(); stackBuilder.setDeclaringClass(elem.getClassName()); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 4ab194c1237..ba646c21d9a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -206,8 +206,7 @@ public class RESTServer implements Constants { } @SuppressWarnings("unchecked") - List remainingArgs = commandLine != null ? - commandLine.getArgList() : new ArrayList(); + List remainingArgs = commandLine != null ? commandLine.getArgList() : new ArrayList<>(); if (remainingArgs.size() != 1) { printUsageAndExit(options, 1); } @@ -256,7 +255,7 @@ public class RESTServer implements Constants { int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1); int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); QueuedThreadPool threadPool = queueSize > 0 ? - new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue(queueSize)) : + new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : new QueuedThreadPool(maxThreads, minThreads, idleTimeout); Server server = new Server(threadPool); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index de84625774a..7be419089df 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -185,7 +185,7 @@ public class RowResource extends ResourceBase { Table table = null; try { List rows = model.getRows(); - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (RowModel row: rows) { byte[] key = row.getKey(); if (key == null) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java index cc51c85965f..5d25c540484 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java @@ -44,9 +44,8 @@ public class RowSpec { private byte[] row = HConstants.EMPTY_START_ROW; private byte[] endRow = null; - private TreeSet columns = - new TreeSet(Bytes.BYTES_COMPARATOR); - private List labels = new ArrayList(); + private TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); + private List labels = new ArrayList<>(); private long startTime = DEFAULT_START_TIMESTAMP; private long endTime = DEFAULT_END_TIMESTAMP; private int maxVersions = 1; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 204f688d767..c9cf49a5e77 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -77,7 +77,7 @@ public class Client { private void initialize(Cluster cluster, boolean sslEnabled) { this.cluster = cluster; this.sslEnabled = sslEnabled; - extraHeaders = new ConcurrentHashMap(); + extraHeaders = new ConcurrentHashMap<>(); String clspath = System.getProperty("java.class.path"); LOG.debug("classpath " + clspath); this.httpClient = new DefaultHttpClient(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 51a75d7dd36..e762c31d7f6 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -173,9 +173,9 @@ public class RemoteHTable implements Table { } protected Result[] buildResultFromModel(final CellSetModel model) { - List results = new ArrayList(); + List results = new ArrayList<>(); for (RowModel row: model.getRows()) { - List kvs = new ArrayList(row.getCells().size()); + List kvs = new ArrayList<>(row.getCells().size()); for (CellModel cell: row.getCells()) { byte[][] split = KeyValue.parseColumn(cell.getColumn()); byte[] column = split[0]; @@ -425,13 +425,12 @@ public class RemoteHTable implements Table { // ignores the row specification in the URI // separate puts by row - TreeMap> map = - new TreeMap>(Bytes.BYTES_COMPARATOR); + TreeMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Put put: puts) { byte[] row = put.getRow(); List cells = map.get(row); if (cells == null) { - cells = new ArrayList(); + cells = new ArrayList<>(); map.put(row, cells); } for (List l: put.getFamilyCellMap().values()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java index 094ae0b3d58..626e61ffc92 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class GzipFilter implements Filter { - private Set mimeTypes = new HashSet(); + private Set mimeTypes = new HashSet<>(); @Override public void init(FilterConfig filterConfig) throws ServletException { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java index dbb1447ca4f..7224383ca34 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java @@ -93,7 +93,7 @@ public class RestCsrfPreventionFilter implements Filter { void parseBrowserUserAgents(String userAgents) { String[] agentsArray = userAgents.split(","); - browserUserAgents = new HashSet(); + browserUserAgents = new HashSet<>(); for (String patternString : agentsArray) { browserUserAgents.add(Pattern.compile(patternString)); } @@ -101,7 +101,7 @@ public class RestCsrfPreventionFilter implements Filter { void parseMethodsToIgnore(String mti) { String[] methods = mti.split(","); - methodsToIgnore = new HashSet(); + methodsToIgnore = new HashSet<>(); for (int i = 0; i < methods.length; i++) { methodsToIgnore.add(methods[i]); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java index 8337ffc3a14..a754fe4e4b0 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -84,7 +84,7 @@ public class CellSetModel implements Serializable, ProtobufMessageHandler { * Constructor */ public CellSetModel() { - this.rows = new ArrayList(); + this.rows = new ArrayList<>(); } /** diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java index 8562cdef8fd..1b855fd3b93 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java @@ -57,7 +57,7 @@ public class ColumnSchemaModel implements Serializable { private static QName VERSIONS = new QName(HConstants.VERSIONS); private String name; - private Map attrs = new LinkedHashMap(); + private Map attrs = new LinkedHashMap<>(); /** * Default constructor diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java index 0c5af3ce125..bcc1581c23c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java @@ -89,7 +89,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan // For properly formed JSON, if no properties, field has to be null (not just no elements). if(nd.getConfiguration().isEmpty()){ return; } - properties = new HashMap(); + properties = new HashMap<>(); properties.putAll(nd.getConfiguration()); } @@ -100,7 +100,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan */ public void addProperty(String key, String value) { if(properties == null){ - properties = new HashMap(); + properties = new HashMap<>(); } properties.put(key, value); } @@ -110,7 +110,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan */ public Map getProperties() { if(properties == null){ - properties = new HashMap(); + properties = new HashMap<>(); } return properties; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java index aed80aafef1..4399b0b0b95 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java @@ -52,7 +52,7 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler { @JsonProperty("Namespace") @XmlElement(name="Namespace") - private List namespaces = new ArrayList(); + private List namespaces = new ArrayList<>(); /** * Default constructor. Do not use. @@ -66,7 +66,7 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler { */ public NamespacesModel(Admin admin) throws IOException { NamespaceDescriptor[] nds = admin.listNamespaceDescriptors(); - namespaces = new ArrayList(nds.length); + namespaces = new ArrayList<>(nds.length); for (NamespaceDescriptor nd : nds) { namespaces.add(nd.getName()); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java index 398d5e1ce70..663c838345f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -64,7 +64,7 @@ public class RowModel implements ProtobufMessageHandler, Serializable { @JsonProperty("Cell") @XmlElement(name="Cell") - private List cells = new ArrayList(); + private List cells = new ArrayList<>(); /** @@ -86,7 +86,7 @@ public class RowModel implements ProtobufMessageHandler, Serializable { */ public RowModel(final byte[] key) { this.key = key; - cells = new ArrayList(); + cells = new ArrayList<>(); } /** diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index 5c8d61850d9..2098c3d2696 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -109,14 +109,14 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { private byte[] startRow = HConstants.EMPTY_START_ROW; private byte[] endRow = HConstants.EMPTY_END_ROW;; - private List columns = new ArrayList(); + private List columns = new ArrayList<>(); private int batch = Integer.MAX_VALUE; private long startTime = 0; private long endTime = Long.MAX_VALUE; private String filter = null; private int maxVersions = Integer.MAX_VALUE; private int caching = -1; - private List labels = new ArrayList(); + private List labels = new ArrayList<>(); private boolean cacheBlocks = true; /** @@ -287,7 +287,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { } break; case FilterList: this.op = ((FilterList)filter).getOperator().toString(); - this.filters = new ArrayList(); + this.filters = new ArrayList<>(); for (Filter child: ((FilterList)filter).getFilters()) { this.filters.add(new FilterModel(child)); } @@ -300,13 +300,13 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { Base64.encodeBytes(((InclusiveStopFilter)filter).getStopRowKey()); break; case MultipleColumnPrefixFilter: - this.prefixes = new ArrayList(); + this.prefixes = new ArrayList<>(); for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) { this.prefixes.add(Base64.encodeBytes(prefix)); } break; case MultiRowRangeFilter: - this.ranges = new ArrayList(); + this.ranges = new ArrayList<>(); for(RowRange range : ((MultiRowRangeFilter)filter).getRowRanges()) { this.ranges.add(new RowRange(range.getStartRow(), range.isStartRowInclusive(), range.getStopRow(), range.isStopRowInclusive())); @@ -349,14 +349,14 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { } } break; case SkipFilter: - this.filters = new ArrayList(); + this.filters = new ArrayList<>(); this.filters.add(new FilterModel(((SkipFilter)filter).getFilter())); break; case TimestampsFilter: this.timestamps = ((TimestampsFilter)filter).getTimestamps(); break; case WhileMatchFilter: - this.filters = new ArrayList(); + this.filters = new ArrayList<>(); this.filters.add( new FilterModel(((WhileMatchFilter)filter).getFilter())); break; @@ -391,7 +391,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { filter = new FamilyFilter(CompareOp.valueOf(op), comparator.build()); break; case FilterList: { - List list = new ArrayList(filters.size()); + List list = new ArrayList<>(filters.size()); for (FilterModel model: filters) { list.add(model.build()); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index c97f3e8f66b..3c3c50e2a40 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -373,7 +373,7 @@ public class StorageClusterStatusModel private long requests; private int heapSizeMB; private int maxHeapSizeMB; - private List regions = new ArrayList(); + private List regions = new ArrayList<>(); /** * Add a region name to the list @@ -505,8 +505,8 @@ public class StorageClusterStatusModel } } - private List liveNodes = new ArrayList(); - private List deadNodes = new ArrayList(); + private List liveNodes = new ArrayList<>(); + private List deadNodes = new ArrayList<>(); private int regions; private long requests; private double averageLoad; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java index 7336eb84ade..c1db1daa6d4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java @@ -53,7 +53,7 @@ public class TableInfoModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; private String name; - private List regions = new ArrayList(); + private List regions = new ArrayList<>(); /** * Default constructor diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java index cc043beda14..f7d9a4240e1 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java @@ -41,7 +41,7 @@ public class TableListModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; - private List tables = new ArrayList(); + private List tables = new ArrayList<>(); /** * Default constructor diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java index 24fd09c38ec..a93a3ca13ef 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -74,8 +74,8 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { new QName(HColumnDescriptor.COMPRESSION); private String name; - private Map attrs = new LinkedHashMap(); - private List columns = new ArrayList(); + private Map attrs = new LinkedHashMap<>(); + private List columns = new ArrayList<>(); /** * Default constructor. diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index 0d29159615e..3559ee0f01b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -125,7 +125,7 @@ public class PerformanceEvaluation extends Configured implements Tool { private TableName tableName = TABLE_NAME; protected HTableDescriptor TABLE_DESCRIPTOR; - protected Map commands = new TreeMap(); + protected Map commands = new TreeMap<>(); protected static Cluster cluster = new Cluster(); volatile Configuration conf; @@ -338,7 +338,7 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override public List getSplits(JobContext job) throws IOException { // generate splits - List splitList = new ArrayList(); + List splitList = new ArrayList<>(); for (FileStatus file: listStatus(job)) { if (file.isDirectory()) { @@ -601,7 +601,7 @@ public class PerformanceEvaluation extends Configured implements Tool { * @throws IOException */ private void doMultipleClients(final Class cmd) throws IOException { - final List threads = new ArrayList(this.N); + final List threads = new ArrayList<>(this.N); final long[] timings = new long[this.N]; final int perClientRows = R/N; final TableName tableName = this.tableName; @@ -724,7 +724,7 @@ public class PerformanceEvaluation extends Configured implements Tool { Path inputFile = new Path(inputDir, "input.txt"); PrintStream out = new PrintStream(fs.create(inputFile)); // Make input random. - Map m = new TreeMap(); + Map m = new TreeMap<>(); Hash h = MurmurHash.getInstance(); int perClientRows = (this.R / this.N); try { @@ -1039,7 +1039,7 @@ public class PerformanceEvaluation extends Configured implements Tool { protected Pair generateStartAndStopRows(int maxRange) { int start = this.rand.nextInt(Integer.MAX_VALUE) % totalRows; int stop = start + maxRange; - return new Pair(format(start), format(stop)); + return new Pair<>(format(start), format(stop)); } @Override diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java index d6eb1b3e212..0f2de447f2d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java @@ -143,7 +143,7 @@ public class TestGetAndPutResource extends RowResourceBase { assertEquals(response.getCode(), 200); checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); - HashMap otherCells = new HashMap(); + HashMap otherCells = new HashMap<>(); otherCells.put(COLUMN_2,VALUE_3); // On Success update both the cells @@ -176,7 +176,7 @@ public class TestGetAndPutResource extends RowResourceBase { assertEquals(response.getCode(), 200); checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); - HashMap otherCells = new HashMap(); + HashMap otherCells = new HashMap<>(); otherCells.put(COLUMN_2,VALUE_3); // On Success update both the cells @@ -214,7 +214,7 @@ public class TestGetAndPutResource extends RowResourceBase { checkValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); // Deletes the following columns based on Column1 check - HashMap cellsToDelete = new HashMap(); + HashMap cellsToDelete = new HashMap<>(); cellsToDelete.put(COLUMN_2,VALUE_2); // Value does not matter cellsToDelete.put(COLUMN_3,VALUE_3); // Value does not matter diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index 2ecba6a244f..2a0b4600ea2 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -82,7 +82,7 @@ public class TestMultiRowResource { @Parameterized.Parameters public static Collection data() { - List params = new ArrayList(2); + List params = new ArrayList<>(2); params.add(new Object[] {Boolean.TRUE}); params.add(new Object[] {Boolean.FALSE}); return params; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java index 2058f505717..58e8ea05328 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java @@ -61,13 +61,13 @@ import org.junit.experimental.categories.Category; @Category({RestTests.class, MediumTests.class}) public class TestNamespacesInstanceResource { private static String NAMESPACE1 = "TestNamespacesInstanceResource1"; - private static Map NAMESPACE1_PROPS = new HashMap(); + private static Map NAMESPACE1_PROPS = new HashMap<>(); private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; - private static Map NAMESPACE2_PROPS = new HashMap(); + private static Map NAMESPACE2_PROPS = new HashMap<>(); private static String NAMESPACE3 = "TestNamespacesInstanceResource3"; - private static Map NAMESPACE3_PROPS = new HashMap(); + private static Map NAMESPACE3_PROPS = new HashMap<>(); private static String NAMESPACE4 = "TestNamespacesInstanceResource4"; - private static Map NAMESPACE4_PROPS = new HashMap(); + private static Map NAMESPACE4_PROPS = new HashMap<>(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = @@ -189,7 +189,7 @@ public class TestNamespacesInstanceResource { table.addFamily(colDesc); admin.createTable(table); - Map nsProperties = new HashMap(); + Map nsProperties = new HashMap<>(); nsProperties.put("key1", "value1"); List nsTables = Arrays.asList("table1", "table2"); @@ -230,7 +230,7 @@ public class TestNamespacesInstanceResource { response = client.get(namespacePath, Constants.MIMETYPE_PROTOBUF); assertEquals(200, response.getCode()); - tablemodel.setTables(new ArrayList()); + tablemodel.setTables(new ArrayList<>()); tablemodel.getObjectFromMessage(response.getBody()); checkNamespaceTables(tablemodel.getTables(), nsTables); @@ -406,7 +406,7 @@ public class TestNamespacesInstanceResource { nd4 = findNamespace(admin, NAMESPACE4); assertNotNull(nd3); assertNotNull(nd4); - checkNamespaceProperties(nd3, new HashMap()); + checkNamespaceProperties(nd3, new HashMap<>()); checkNamespaceProperties(nd4, NAMESPACE4_PROPS); // Check cannot post tables that already exist. diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 083ddbe9848..6816e53f7dd 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -957,7 +957,7 @@ public class TestScannersWithFilters { // Test getting a single row, single key using Row, Qualifier, and Value // regular expression and substring filters // Use must pass all - List filters = new ArrayList(3); + List filters = new ArrayList<>(3); filters.add(new RowFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2"))); filters.add(new QualifierFilter(CompareOp.EQUAL, diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java index df920b1d582..f0c3d4a82d7 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java @@ -73,7 +73,7 @@ public class TestSchemaResource { @Parameterized.Parameters public static Collection data() { - List params = new ArrayList(2); + List params = new ArrayList<>(2); params.add(new Object[] {Boolean.TRUE}); params.add(new Object[] {Boolean.FALSE}); return params; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index 0310d9ff007..f35208aa111 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -262,7 +262,7 @@ public class TestRemoteTable { @Test public void testMultiGet() throws Exception { - ArrayList gets = new ArrayList(2); + ArrayList gets = new ArrayList<>(2); gets.add(new Get(ROW_1)); gets.add(new Get(ROW_2)); Result[] results = remoteTable.get(gets); @@ -272,7 +272,7 @@ public class TestRemoteTable { assertEquals(2, results[1].size()); //Test Versions - gets = new ArrayList(2); + gets = new ArrayList<>(2); Get g = new Get(ROW_1); g.setMaxVersions(3); gets.add(g); @@ -284,13 +284,13 @@ public class TestRemoteTable { assertEquals(3, results[1].size()); //404 - gets = new ArrayList(1); + gets = new ArrayList<>(1); gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); results = remoteTable.get(gets); assertNotNull(results); assertEquals(0, results.length); - gets = new ArrayList(3); + gets = new ArrayList<>(3); gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); gets.add(new Get(ROW_1)); gets.add(new Get(ROW_2)); @@ -314,7 +314,7 @@ public class TestRemoteTable { // multiput - List puts = new ArrayList(3); + List puts = new ArrayList<>(3); put = new Put(ROW_3); put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2); puts.add(put); @@ -408,7 +408,7 @@ public class TestRemoteTable { */ @Test public void testScanner() throws IOException { - List puts = new ArrayList(4); + List puts = new ArrayList<>(4); Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); puts.add(put); @@ -499,7 +499,7 @@ public class TestRemoteTable { */ @Test public void testIteratorScaner() throws IOException { - List puts = new ArrayList(4); + List puts = new ArrayList<>(4); Put put = new Put(ROW_1); put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); puts.add(put); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java index 0d4bbbdc7f5..3dee5cb722c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java @@ -30,7 +30,7 @@ import org.junit.experimental.categories.Category; @Category({RestTests.class, SmallTests.class}) public class TestNamespacesInstanceModel extends TestModelBase { - public static final Map NAMESPACE_PROPERTIES = new HashMap(); + public static final Map NAMESPACE_PROPERTIES = new HashMap<>(); public static final String NAMESPACE_NAME = "namespaceName"; public TestNamespacesInstanceModel() throws Exception { diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java index 74e91fea981..dfec736b54c 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java @@ -183,4 +183,4 @@ class RSGroupAdminClient implements RSGroupAdmin { throw ProtobufUtil.handleRemoteException(e); } } -} \ No newline at end of file +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index 1f0be5ab5a9..811cf71a23d 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -82,7 +82,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { private void checkOnlineServersOnly(Set
servers) throws ConstraintException { // This uglyness is because we only have Address, not ServerName. // Online servers are keyed by ServerName. - Set
onlineServers = new HashSet
(); + Set
onlineServers = new HashSet<>(); for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { onlineServers.add(server.getAddress()); } @@ -114,7 +114,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { * @return List of Regions associated with this server. */ private List getRegions(final Address server) { - LinkedList regions = new LinkedList(); + LinkedList regions = new LinkedList<>(); for (Map.Entry el : master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { if (el.getValue().getAddress().equals(server)) { @@ -381,7 +381,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { } //We balance per group instead of per table - List plans = new ArrayList(); + List plans = new ArrayList<>(); for(Map.Entry>> tableMap: getRSGroupAssignmentsByTable(groupName).entrySet()) { LOG.info("Creating partial plan for table " + tableMap.getKey() + ": " diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index b36fd21f366..30efc0a83ec 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -120,7 +120,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { } Map> correctedState = correctAssignments(clusterState); - List regionPlans = new ArrayList(); + List regionPlans = new ArrayList<>(); List misplacedRegions = correctedState.get(LoadBalancer.BOGUS_SERVER_NAME); for (HRegionInfo regionInfo : misplacedRegions) { @@ -129,10 +129,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { try { List rsgi = rsGroupInfoManager.listRSGroups(); for (RSGroupInfo info: rsgi) { - Map> groupClusterState = - new HashMap>(); - Map>> groupClusterLoad = - new HashMap>>(); + Map> groupClusterState = new HashMap<>(); + Map>> groupClusterLoad = new HashMap<>(); for (Address sName : info.getServers()) { for(ServerName curr: clusterState.keySet()) { if(curr.getAddress().equals(sName)) { @@ -180,7 +178,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { public Map> retainAssignment( Map regions, List servers) throws HBaseIOException { try { - Map> assignments = new TreeMap>(); + Map> assignments = new TreeMap<>(); ListMultimap groupToRegion = ArrayListMultimap.create(); Set misplacedRegions = getMisplacedRegions(regions); for (HRegionInfo region : regions.keySet()) { @@ -213,13 +211,13 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { candidateList); if (server != null) { if (!assignments.containsKey(server)) { - assignments.put(server, new ArrayList()); + assignments.put(server, new ArrayList<>()); } assignments.get(server).add(region); } else { //if not server is available assign to bogus so it ends up in RIT if(!assignments.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { - assignments.put(LoadBalancer.BOGUS_SERVER_NAME, new ArrayList()); + assignments.put(LoadBalancer.BOGUS_SERVER_NAME, new ArrayList<>()); } assignments.get(LoadBalancer.BOGUS_SERVER_NAME).add(region); } @@ -299,7 +297,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { private Set getMisplacedRegions( Map regions) throws IOException { - Set misplacedRegions = new HashSet(); + Set misplacedRegions = new HashSet<>(); for(Map.Entry region : regions.entrySet()) { HRegionInfo regionInfo = region.getKey(); ServerName assignedServer = region.getValue(); @@ -321,13 +319,12 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { private Map> correctAssignments( Map> existingAssignments){ - Map> correctAssignments = - new TreeMap>(); - List misplacedRegions = new LinkedList(); - correctAssignments.put(LoadBalancer.BOGUS_SERVER_NAME, new LinkedList()); + Map> correctAssignments = new TreeMap<>(); + List misplacedRegions = new LinkedList<>(); + correctAssignments.put(LoadBalancer.BOGUS_SERVER_NAME, new LinkedList<>()); for (Map.Entry> assignments : existingAssignments.entrySet()){ ServerName sName = assignments.getKey(); - correctAssignments.put(sName, new LinkedList()); + correctAssignments.put(sName, new LinkedList<>()); List regions = assignments.getValue(); for (HRegionInfo region : regions) { RSGroupInfo info = null; diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index b7940847f4a..83fe122e6b8 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -89,7 +89,7 @@ public class TestRSGroupBasedLoadBalancer { rand = new SecureRandom(); servers = generateServers(7); groupMap = constructGroupInfo(servers, groups); - tableMap = new HashMap(); + tableMap = new HashMap<>(); tableDescs = constructTableDesc(); Configuration conf = HBaseConfiguration.create(); conf.set("hbase.regions.slop", "0"); @@ -231,7 +231,7 @@ public class TestRSGroupBasedLoadBalancer { public void testRetainAssignment() throws Exception { // Test simple case where all same servers are there Map> currentAssignments = mockClusterServers(); - Map inputForTest = new HashMap(); + Map inputForTest = new HashMap<>(); for (ServerName sn : currentAssignments.keySet()) { for (HRegionInfo region : currentAssignments.get(sn)) { inputForTest.put(region, sn); @@ -264,8 +264,8 @@ public class TestRSGroupBasedLoadBalancer { Map> assignment) throws FileNotFoundException, IOException { // Verify condition 1, every region assigned, and to online server - Set onlineServerSet = new TreeSet(servers); - Set assignedRegions = new TreeSet(); + Set onlineServerSet = new TreeSet<>(servers); + Set assignedRegions = new TreeSet<>(); for (Map.Entry> a : assignment.entrySet()) { assertTrue( "Region assigned to server that was not listed as online", @@ -276,7 +276,7 @@ public class TestRSGroupBasedLoadBalancer { assertEquals(existing.size(), assignedRegions.size()); // Verify condition 2, every region must be assigned to correct server. - Set onlineHostNames = new TreeSet(); + Set onlineHostNames = new TreeSet<>(); for (ServerName s : servers) { onlineHostNames.add(s.getHostname()); } @@ -402,7 +402,7 @@ public class TestRSGroupBasedLoadBalancer { private Map> mockClusterServers() throws IOException { assertTrue(servers.size() == regionAssignment.length); - Map> assignment = new TreeMap>(); + Map> assignment = new TreeMap<>(); for (int i = 0; i < servers.size(); i++) { int numRegions = regionAssignment[i]; List regions = assignedRegions(numRegions, servers.get(i)); @@ -418,7 +418,7 @@ public class TestRSGroupBasedLoadBalancer { * @return List of HRegionInfo. */ private List randomRegions(int numRegions) { - List regions = new ArrayList(numRegions); + List regions = new ArrayList<>(numRegions); byte[] start = new byte[16]; byte[] end = new byte[16]; rand.nextBytes(start); @@ -444,7 +444,7 @@ public class TestRSGroupBasedLoadBalancer { * @throws java.io.IOException Signals that an I/O exception has occurred. */ private List assignedRegions(int numRegions, ServerName sn) throws IOException { - List regions = new ArrayList(numRegions); + List regions = new ArrayList<>(numRegions); byte[] start = new byte[16]; byte[] end = new byte[16]; Bytes.putInt(start, 0, numRegions << 1); @@ -460,7 +460,7 @@ public class TestRSGroupBasedLoadBalancer { } private static List generateServers(int numServers) { - List servers = new ArrayList(numServers); + List servers = new ArrayList<>(numServers); for (int i = 0; i < numServers; i++) { String host = "server" + rand.nextInt(100000); int port = rand.nextInt(60000); @@ -481,7 +481,7 @@ public class TestRSGroupBasedLoadBalancer { assertTrue(servers != null); assertTrue(servers.size() >= groups.length); int index = 0; - Map groupMap = new HashMap(); + Map groupMap = new HashMap<>(); for (String grpName : groups) { RSGroupInfo RSGroupInfo = new RSGroupInfo(grpName); RSGroupInfo.addServer(servers.get(index).getAddress()); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java index 9096dfea750..5f9116b6407 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java @@ -95,7 +95,7 @@ public abstract class TestRSGroupsBase { assertTrue(defaultInfo.getServers().size() >= serverCount); rsGroupAdmin.addRSGroup(groupName); - Set
set = new HashSet
(); + Set
set = new HashSet<>(); for(Address server: defaultInfo.getServers()) { if(set.size() == serverCount) { break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java index 37738633377..b0dfd42a387 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java @@ -104,8 +104,7 @@ public class HDFSBlocksDistribution { * Constructor */ public HDFSBlocksDistribution() { - this.hostAndWeights = - new TreeMap(); + this.hostAndWeights = new TreeMap<>(); } /** @@ -229,7 +228,7 @@ public class HDFSBlocksDistribution { */ public List getTopHosts() { HostAndWeight[] hostAndWeights = getTopHostsWithWeights(); - List topHosts = new ArrayList(hostAndWeights.length); + List topHosts = new ArrayList<>(hostAndWeights.length); for(HostAndWeight haw : hostAndWeights) { topHosts.add(haw.getHost()); } @@ -240,8 +239,7 @@ public class HDFSBlocksDistribution { * return the sorted list of hosts in terms of their weights */ public HostAndWeight[] getTopHostsWithWeights() { - NavigableSet orderedHosts = new TreeSet( - new HostAndWeight.WeightComparator()); + NavigableSet orderedHosts = new TreeSet<>(new HostAndWeight.WeightComparator()); orderedHosts.addAll(this.hostAndWeights.values()); return orderedHosts.descendingSet().toArray(new HostAndWeight[orderedHosts.size()]); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java index 530a323d04d..45e0f3aff9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java @@ -59,7 +59,7 @@ class HealthChecker { public void init(String location, long timeout) { this.healthCheckScript = location; this.scriptTimeout = timeout; - ArrayList execScript = new ArrayList(); + ArrayList execScript = new ArrayList<>(); execScript.add(healthCheckScript); this.shexec = new ShellCommandExecutor(execScript.toArray(new String[execScript.size()]), null, null, scriptTimeout); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java index 9265fb8d2ff..788d25bae6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java @@ -101,7 +101,7 @@ public class JMXListener implements Coprocessor { + ",passwordFile:" + passwordFile + ",accessFile:" + accessFile); // Environment map - HashMap jmxEnv = new HashMap(); + HashMap jmxEnv = new HashMap<>(); RMIClientSocketFactory csf = null; RMIServerSocketFactory ssf = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 33fff97a95d..255ca31a6da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -61,10 +61,8 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; @InterfaceStability.Evolving public class LocalHBaseCluster { private static final Log LOG = LogFactory.getLog(LocalHBaseCluster.class); - private final List masterThreads = - new CopyOnWriteArrayList(); - private final List regionThreads = - new CopyOnWriteArrayList(); + private final List masterThreads = new CopyOnWriteArrayList<>(); + private final List regionThreads = new CopyOnWriteArrayList<>(); private final static int DEFAULT_NO = 1; /** local mode */ public static final String LOCAL = "local"; @@ -257,8 +255,7 @@ public class LocalHBaseCluster { * list). */ public List getLiveRegionServers() { - List liveServers = - new ArrayList(); + List liveServers = new ArrayList<>(); List list = getRegionServers(); for (JVMClusterUtil.RegionServerThread rst: list) { if (rst.isAlive()) liveServers.add(rst); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java index e1bc4ef84b3..d505d6fdade 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java @@ -25,7 +25,7 @@ public class SslRMIClientSocketFactorySecure extends SslRMIClientSocketFactory { @Override public Socket createSocket(String host, int port) throws IOException { SSLSocket socket = (SSLSocket) super.createSocket(host, port); - ArrayList secureProtocols = new ArrayList(); + ArrayList secureProtocols = new ArrayList<>(); for (String p : socket.getEnabledProtocols()) { if (!p.contains("SSLv3")) { secureProtocols.add(p); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java index bd9462521c0..8560ddc1ef0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java @@ -42,7 +42,7 @@ public class SslRMIServerSocketFactorySecure extends SslRMIServerSocketFactory { sslSocket.setUseClientMode(false); sslSocket.setNeedClientAuth(false); - ArrayList secureProtocols = new ArrayList(); + ArrayList secureProtocols = new ArrayList<>(); for (String p : sslSocket.getEnabledProtocols()) { if (!p.contains("SSLv3")) { secureProtocols.add(p); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java index 36df0026846..6ae96372176 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java @@ -55,7 +55,7 @@ public class ZKNamespaceManager extends ZooKeeperListener { public ZKNamespaceManager(ZooKeeperWatcher zkw) throws IOException { super(zkw); nsZNode = zkw.znodePaths.namespaceZNode; - cache = new ConcurrentSkipListMap(); + cache = new ConcurrentSkipListMap<>(); } public void start() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index ee328879a13..52185f19c57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -126,7 +126,7 @@ public class HFileArchiver { // otherwise, we attempt to archive the store files // build collection of just the store directories to archive - Collection toArchive = new ArrayList(); + Collection toArchive = new ArrayList<>(); final PathFilter dirFilter = new FSUtils.DirFilter(fs); PathFilter nonHidden = new PathFilter() { @Override @@ -324,7 +324,7 @@ public class HFileArchiver { if (LOG.isTraceEnabled()) LOG.trace("Created archive directory:" + baseArchiveDir); } - List failures = new ArrayList(); + List failures = new ArrayList<>(); String startTime = Long.toString(start); for (File file : toArchive) { // if its a file archive it @@ -475,7 +475,7 @@ public class HFileArchiver { private static void deleteStoreFilesWithoutArchiving(Collection compactedFiles) throws IOException { LOG.debug("Deleting store files without archiving."); - List errors = new ArrayList(0); + List errors = new ArrayList<>(0); for (StoreFile hsf : compactedFiles) { try { hsf.deleteReader(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java index 3258cbb1c42..3a1653417dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java @@ -32,7 +32,7 @@ import org.apache.commons.logging.LogFactory; */ public class HFileArchiveTableMonitor { private static final Log LOG = LogFactory.getLog(HFileArchiveTableMonitor.class); - private final Set archivedTables = new TreeSet(); + private final Set archivedTables = new TreeSet<>(); /** * Set the tables to be archived. Internally adds each table and attempts to diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index dde2f100120..8ff118e37e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -61,7 +61,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner { // create an internal region scanner this.scanner = region.getScanner(scan); - values = new ArrayList(); + values = new ArrayList<>(); if (scanMetrics == null) { initScanMetrics(scan); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java index 6a732612c15..051a8f26e91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java @@ -84,7 +84,7 @@ public final class HTableWrapper implements Table { } public void internalClose() throws IOException { - List exceptions = new ArrayList(2); + List exceptions = new ArrayList<>(2); try { table.close(); } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index 4601ae46ac9..49a718cca5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -127,7 +127,7 @@ public class TableSnapshotScanner extends AbstractClientScanner { final List restoredRegions = meta.getRegionsToAdd(); htd = meta.getTableDescriptor(); - regions = new ArrayList(restoredRegions.size()); + regions = new ArrayList<>(restoredRegions.size()); for (HRegionInfo hri: restoredRegions) { if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), hri.getEndKey())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index 9eaecd31614..f2176417be0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -48,7 +48,7 @@ public class ConstraintProcessor implements RegionObserver { private final ClassLoader classloader; - private List constraints = new ArrayList(); + private List constraints = new ArrayList<>(); /** * Create the constraint processor. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java index 09c935db1b2..5ed9aa845fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java @@ -120,7 +120,7 @@ public final class Constraints { disable(desc); // remove all the constraint settings - List keys = new ArrayList(); + List keys = new ArrayList<>(); // loop through all the key, values looking for constraints for (Map.Entry e : desc .getValues().entrySet()) { @@ -165,7 +165,7 @@ public final class Constraints { String key = serializeConstraintClass(clazz); String value = desc.getValue(key); - return value == null ? null : new Pair(key, value); + return value == null ? null : new Pair<>(key, value); } /** @@ -557,7 +557,7 @@ public final class Constraints { */ static List getConstraints(HTableDescriptor desc, ClassLoader classloader) throws IOException { - List constraints = new ArrayList(); + List constraints = new ArrayList<>(); // loop through all the key, values looking for constraints for (Map.Entry e : desc .getValues().entrySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index a226eb60d36..1654c672195 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -761,7 +761,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements if (taskOrRescanList == null || taskOrRescanList.isEmpty()) { return Collections. emptyList(); } - List taskList = new ArrayList(); + List taskList = new ArrayList<>(); for (String taskOrRescan : taskOrRescanList) { // Remove rescan nodes if (!ZKSplitLog.isRescanNode(taskOrRescan)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 2bf9d789202..70445bdcd67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -449,7 +449,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements if (!recoveringRegions.isEmpty()) { // Make a local copy to prevent ConcurrentModificationException when other threads // modify recoveringRegions - List tmpCopy = new ArrayList(recoveringRegions.keySet()); + List tmpCopy = new ArrayList<>(recoveringRegions.keySet()); int listSize = tmpCopy.size(); for (int i = 0; i < listSize; i++) { String region = tmpCopy.get(i); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 1d58bf9a941..bdface1c7d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -81,8 +81,7 @@ public abstract class CoprocessorHost { private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); protected Abortable abortable; /** Ordered set of loaded coprocessors with lock */ - protected SortedList coprocessors = - new SortedList(new EnvironmentPriorityComparator()); + protected SortedList coprocessors = new SortedList<>(new EnvironmentPriorityComparator()); protected Configuration conf; // unique file prefix to use for local copies of jars when classloading protected String pathPrefix; @@ -118,7 +117,7 @@ public abstract class CoprocessorHost { * to master). */ public Set getCoprocessors() { - Set returnValue = new TreeSet(); + Set returnValue = new TreeSet<>(); for (CoprocessorEnvironment e: coprocessors) { returnValue.add(e.getInstance().getClass().getSimpleName()); } @@ -318,7 +317,7 @@ public abstract class CoprocessorHost { * @return the list of coprocessors, or null if not found */ public List findCoprocessors(Class cls) { - ArrayList ret = new ArrayList(); + ArrayList ret = new ArrayList<>(); for (E env: coprocessors) { Coprocessor cp = env.getInstance(); @@ -338,7 +337,7 @@ public abstract class CoprocessorHost { * @return the list of CoprocessorEnvironment, or null if not found */ public List findCoprocessorEnvironment(Class cls) { - ArrayList ret = new ArrayList(); + ArrayList ret = new ArrayList<>(); for (E env: coprocessors) { Coprocessor cp = env.getInstance(); @@ -373,7 +372,7 @@ public abstract class CoprocessorHost { * @return A set of ClassLoader instances */ Set getExternalClassLoaders() { - Set externalClassLoaders = new HashSet(); + Set externalClassLoaders = new HashSet<>(); final ClassLoader systemClassLoader = this.getClass().getClassLoader(); for (E env : coprocessors) { ClassLoader cl = env.getInstance().getClass().getClassLoader(); @@ -664,7 +663,7 @@ public abstract class CoprocessorHost { * Used to limit legacy handling to once per Coprocessor class per classloader. */ private static final Set> legacyWarning = - new ConcurrentSkipListSet>( + new ConcurrentSkipListSet<>( new Comparator>() { @Override public int compare(Class c1, Class c2) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java index 834b54c65aa..3773fa64f55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java @@ -86,9 +86,9 @@ CoprocessorService, Coprocessor { MutateRowsResponse response = MutateRowsResponse.getDefaultInstance(); try { // set of rows to lock, sorted to avoid deadlocks - SortedSet rowsToLock = new TreeSet(Bytes.BYTES_COMPARATOR); + SortedSet rowsToLock = new TreeSet<>(Bytes.BYTES_COMPARATOR); List mutateRequestList = request.getMutationRequestList(); - List mutations = new ArrayList(mutateRequestList.size()); + List mutations = new ArrayList<>(mutateRequestList.size()); for (MutationProto m : mutateRequestList) { mutations.add(ProtobufUtil.toMutation(m)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java index 52f2b95a111..fc807680430 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java @@ -124,7 +124,7 @@ public class ObserverContext { public static ObserverContext createAndPrepare( T env, ObserverContext context) { if (context == null) { - context = new ObserverContext(RpcServer.getRequestUser()); + context = new ObserverContext<>(RpcServer.getRequestUser()); } context.prepare(env); return context; @@ -146,7 +146,7 @@ public class ObserverContext { public static ObserverContext createAndPrepare( T env, ObserverContext context, User user) { if (context == null) { - context = new ObserverContext(user); + context = new ObserverContext<>(user); } context.prepare(env); return context; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java index bfcf4863b2b..a00ccd9687a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java @@ -107,8 +107,7 @@ public class ForeignException extends IOException { // if there is no stack trace, ignore it and just return the message if (trace == null) return null; // build the stack trace for the message - List pbTrace = - new ArrayList(trace.length); + List pbTrace = new ArrayList<>(trace.length); for (StackTraceElement elem : trace) { StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder(); stackBuilder.setDeclaringClass(elem.getClassName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java index f5fc9793b00..f339e9eded9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java @@ -42,8 +42,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; public class ForeignExceptionDispatcher implements ForeignExceptionListener, ForeignExceptionSnare { private static final Log LOG = LogFactory.getLog(ForeignExceptionDispatcher.class); protected final String name; - protected final List listeners = - new ArrayList(); + protected final List listeners = new ArrayList<>(); private ForeignException exception; public ForeignExceptionDispatcher(String name) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java index 403244fca62..df7653fd0cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java @@ -59,8 +59,7 @@ public class ExecutorService { private static final Log LOG = LogFactory.getLog(ExecutorService.class); // hold the all the executors created in a map addressable by their names - private final ConcurrentHashMap executorMap = - new ConcurrentHashMap(); + private final ConcurrentHashMap executorMap = new ConcurrentHashMap<>(); // Name of the server hosting this executor service. private final String servername; @@ -164,7 +163,7 @@ public class ExecutorService { // the thread pool executor that services the requests final TrackingThreadPoolExecutor threadPoolExecutor; // work queue to use - unbounded queue - final BlockingQueue q = new LinkedBlockingQueue(); + final BlockingQueue q = new LinkedBlockingQueue<>(); private final String name; private static final AtomicLong seqids = new AtomicLong(0); private final long id; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java index 625d01fd885..48745caef2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java @@ -85,9 +85,9 @@ public class FavoredNodeAssignmentHelper { final RackManager rackManager) { this.servers = servers; this.rackManager = rackManager; - this.rackToRegionServerMap = new HashMap>(); - this.regionServerToRackMap = new HashMap(); - this.uniqueRackList = new ArrayList(); + this.rackToRegionServerMap = new HashMap<>(); + this.regionServerToRackMap = new HashMap<>(); + this.uniqueRackList = new ArrayList<>(); this.random = new Random(); } @@ -122,7 +122,7 @@ public class FavoredNodeAssignmentHelper { public static void updateMetaWithFavoredNodesInfo( Map> regionToFavoredNodes, Connection connection) throws IOException { - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (Map.Entry> entry : regionToFavoredNodes.entrySet()) { Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue()); if (put != null) { @@ -142,7 +142,7 @@ public class FavoredNodeAssignmentHelper { public static void updateMetaWithFavoredNodesInfo( Map> regionToFavoredNodes, Configuration conf) throws IOException { - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (Map.Entry> entry : regionToFavoredNodes.entrySet()) { Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue()); if (put != null) { @@ -226,7 +226,7 @@ public class FavoredNodeAssignmentHelper { // The regions should be distributed proportionately to the racksizes void placePrimaryRSAsRoundRobin(Map> assignmentMap, Map primaryRSMap, List regions) { - List rackList = new ArrayList(rackToRegionServerMap.size()); + List rackList = new ArrayList<>(rackToRegionServerMap.size()); rackList.addAll(rackToRegionServerMap.keySet()); int rackIndex = random.nextInt(rackList.size()); int maxRackSize = 0; @@ -266,7 +266,7 @@ public class FavoredNodeAssignmentHelper { if (assignmentMap != null) { List regionsForServer = assignmentMap.get(currentServer); if (regionsForServer == null) { - regionsForServer = new ArrayList(); + regionsForServer = new ArrayList<>(); assignmentMap.put(currentServer, regionsForServer); } regionsForServer.add(regionInfo); @@ -284,8 +284,7 @@ public class FavoredNodeAssignmentHelper { Map placeSecondaryAndTertiaryRS( Map primaryRSMap) { - Map secondaryAndTertiaryMap = - new HashMap(); + Map secondaryAndTertiaryMap = new HashMap<>(); for (Map.Entry entry : primaryRSMap.entrySet()) { // Get the target region and its primary region server rack HRegionInfo regionInfo = entry.getKey(); @@ -317,12 +316,11 @@ public class FavoredNodeAssignmentHelper { private Map> mapRSToPrimaries( Map primaryRSMap) { - Map> primaryServerMap = - new HashMap>(); + Map> primaryServerMap = new HashMap<>(); for (Entry e : primaryRSMap.entrySet()) { Set currentSet = primaryServerMap.get(e.getValue()); if (currentSet == null) { - currentSet = new HashSet(); + currentSet = new HashSet<>(); } currentSet.add(e.getKey()); primaryServerMap.put(e.getValue(), currentSet); @@ -341,8 +339,7 @@ public class FavoredNodeAssignmentHelper { Map primaryRSMap) { Map> serverToPrimaries = mapRSToPrimaries(primaryRSMap); - Map secondaryAndTertiaryMap = - new HashMap(); + Map secondaryAndTertiaryMap = new HashMap<>(); for (Entry entry : primaryRSMap.entrySet()) { // Get the target region and its primary region server rack @@ -381,11 +378,11 @@ public class FavoredNodeAssignmentHelper { // Random to choose the secondary and tertiary region server // from another rack to place the secondary and tertiary // Random to choose one rack except for the current rack - Set rackSkipSet = new HashSet(); + Set rackSkipSet = new HashSet<>(); rackSkipSet.add(primaryRack); String secondaryRack = getOneRandomRack(rackSkipSet); List serverList = getServersFromRack(secondaryRack); - Set serverSet = new HashSet(); + Set serverSet = new HashSet<>(); serverSet.addAll(serverList); ServerName[] favoredNodes; if (serverList.size() >= 2) { @@ -393,7 +390,7 @@ public class FavoredNodeAssignmentHelper { // Skip the secondary for the tertiary placement // skip the servers which share the primary already Set primaries = serverToPrimaries.get(primaryRS); - Set skipServerSet = new HashSet(); + Set skipServerSet = new HashSet<>(); while (true) { ServerName[] secondaryAndTertiary = null; if (primaries.size() > 1) { @@ -423,7 +420,7 @@ public class FavoredNodeAssignmentHelper { } secondaryRack = getOneRandomRack(rackSkipSet); serverList = getServersFromRack(secondaryRack); - serverSet = new HashSet(); + serverSet = new HashSet<>(); serverSet.addAll(serverList); } @@ -452,7 +449,7 @@ public class FavoredNodeAssignmentHelper { // Pick the tertiary if (getTotalNumberOfRacks() == 2) { // Pick the tertiary from the same rack of the primary RS - Set serverSkipSet = new HashSet(); + Set serverSkipSet = new HashSet<>(); serverSkipSet.add(primaryRS); favoredNodes[1] = getOneRandomServer(primaryRack, serverSkipSet); } else { @@ -478,7 +475,7 @@ public class FavoredNodeAssignmentHelper { } else { // Randomly select two region servers from the server list and make sure // they are not overlap with the primary region server; - Set serverSkipSet = new HashSet(); + Set serverSkipSet = new HashSet<>(); serverSkipSet.add(primaryRS); // Place the secondary RS diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index f0af0d0aa5d..6e7bf0e4f5d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -87,7 +87,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored @Override public List balanceCluster(Map> clusterState) { //TODO. Look at is whether Stochastic loadbalancer can be integrated with this - List plans = new ArrayList(); + List plans = new ArrayList<>(); //perform a scan of the meta to get the latest updates (if any) SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment = new SnapshotOfRegionAssignmentFromMeta(super.services.getConnection()); @@ -97,10 +97,8 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored LOG.warn("Not running balancer since exception was thrown " + ie); return plans; } - Map serverNameToServerNameWithoutCode = - new HashMap(); - Map serverNameWithoutCodeToServerName = - new HashMap(); + Map serverNameToServerNameWithoutCode = new HashMap<>(); + Map serverNameWithoutCodeToServerName = new HashMap<>(); ServerManager serverMgr = super.services.getServerManager(); for (ServerName sn: serverMgr.getOnlineServersList()) { ServerName s = ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE); @@ -189,7 +187,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers); Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); List regionsWithNoFavoredNodes = segregatedRegions.getSecond(); - assignmentMap = new HashMap>(); + assignmentMap = new HashMap<>(); roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes, servers); // merge the assignment maps @@ -225,9 +223,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } } } - List regions = new ArrayList(1); + List regions = new ArrayList<>(1); regions.add(regionInfo); - Map primaryRSMap = new HashMap(1); + Map primaryRSMap = new HashMap<>(1); primaryRSMap.put(regionInfo, primary); assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap); return primary; @@ -241,9 +239,8 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored private Pair>, List> segregateRegionsAndAssignRegionsWithFavoredNodes(List regions, List availableServers) { - Map> assignmentMapForFavoredNodes = - new HashMap>(regions.size() / 2); - List regionsWithNoFavoredNodes = new ArrayList(regions.size()/2); + Map> assignmentMapForFavoredNodes = new HashMap<>(regions.size() / 2); + List regionsWithNoFavoredNodes = new ArrayList<>(regions.size()/2); for (HRegionInfo region : regions) { List favoredNodes = fnm.getFavoredNodes(region); ServerName primaryHost = null; @@ -272,8 +269,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored regionsWithNoFavoredNodes.add(region); } } - return new Pair>, List>( - assignmentMapForFavoredNodes, regionsWithNoFavoredNodes); + return new Pair<>(assignmentMapForFavoredNodes, regionsWithNoFavoredNodes); } // Do a check of the hostname and port and return the servername from the servers list @@ -316,7 +312,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored HRegionInfo region, ServerName host) { List regionsOnServer = null; if ((regionsOnServer = assignmentMapForFavoredNodes.get(host)) == null) { - regionsOnServer = new ArrayList(); + regionsOnServer = new ArrayList<>(); assignmentMapForFavoredNodes.put(host, regionsOnServer); } regionsOnServer.add(region); @@ -329,7 +325,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelper, Map> assignmentMap, List regions, List servers) throws IOException { - Map primaryRSMap = new HashMap(); + Map primaryRSMap = new HashMap<>(); // figure the primary RSs assignmentHelper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap); @@ -347,7 +343,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored for (HRegionInfo region : regions) { // Store the favored nodes without startCode for the ServerName objects // We don't care about the startcode; but only the hostname really - List favoredNodesForRegion = new ArrayList(3); + List favoredNodesForRegion = new ArrayList<>(3); ServerName sn = primaryRSMap.get(region); favoredNodesForRegion.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java index f24d9fcc3b4..ff6d9e16f2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java @@ -46,7 +46,7 @@ public class FavoredNodesPlan { } public FavoredNodesPlan() { - favoredNodesMap = new ConcurrentHashMap>(); + favoredNodesMap = new ConcurrentHashMap<>(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java index de53bd9952c..cfc0640dadb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java @@ -38,7 +38,7 @@ public class HttpRequestLog { private static final HashMap serverToComponent; static { - serverToComponent = new HashMap(); + serverToComponent = new HashMap<>(); serverToComponent.put("master", "master"); serverToComponent.put("region", "regionserver"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index 3ce2f09031b..c7e11537cef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -163,9 +163,8 @@ public class HttpServer implements FilterContainer { protected final WebAppContext webAppContext; protected final boolean findPort; - protected final Map defaultContexts = - new HashMap(); - protected final List filterNames = new ArrayList(); + protected final Map defaultContexts = new HashMap<>(); + protected final List filterNames = new ArrayList<>(); static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; @@ -555,7 +554,7 @@ public class HttpServer implements FilterContainer { addDefaultApps(contexts, appDir, conf); addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); - Map params = new HashMap(); + Map params = new HashMap<>(); params.put("xframeoptions", conf.get("hbase.http.filter.xframeoptions.mode", "DENY")); addGlobalFilter("clickjackingprevention", ClickjackingPreventionFilter.class.getName(), params); @@ -906,7 +905,7 @@ public class HttpServer implements FilterContainer { private void initSpnego(Configuration conf, String hostName, String usernameConfKey, String keytabConfKey, String kerberosNameRuleKey, String signatureSecretKeyFileKey) throws IOException { - Map params = new HashMap(); + Map params = new HashMap<>(); String principalInConf = getOrEmptyString(conf, usernameConfKey); if (!principalInConf.isEmpty()) { params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, SecurityUtil.getServerPrincipal( @@ -1302,7 +1301,7 @@ public class HttpServer implements FilterContainer { @Override public Map getParameterMap() { - Map result = new HashMap(); + Map result = new HashMap<>(); Map raw = rawRequest.getParameterMap(); for (Map.Entry item: raw.entrySet()) { String[] rawValue = item.getValue(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java index 710676d9fc5..7c3204b2395 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java @@ -124,7 +124,7 @@ public class StaticUserWebFilter extends FilterInitializer { @Override public void initFilter(FilterContainer container, Configuration conf) { - HashMap options = new HashMap(); + HashMap options = new HashMap<>(); String username = getUsernameFromConf(conf); options.put(HBASE_HTTP_STATIC_USER, username); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java index 3caf67f7f01..ca0dfbc8e77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java @@ -426,7 +426,7 @@ public class FileLink { protected void setLocations(Path originPath, Path... alternativePaths) { assert this.locations == null : "Link locations already set"; - List paths = new ArrayList(alternativePaths.length +1); + List paths = new ArrayList<>(alternativePaths.length +1); if (originPath != null) { paths.add(originPath); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index 5128662a21d..cdc5be167b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -509,7 +509,7 @@ public class HFileLink extends FileLink { String tableSubstr = name.substring(separatorIndex + 1) .replace('=', TableName.NAMESPACE_DELIM); TableName linkTableName = TableName.valueOf(tableSubstr); - return new Pair(linkTableName, linkRegionName); + return new Pair<>(linkTableName, linkRegionName); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java index a6ee6da7773..c64cdf7440e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java @@ -494,7 +494,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { * @return A CompletableFuture that hold the acked length after flushing. */ public CompletableFuture flush(boolean syncBlock) { - CompletableFuture future = new CompletableFuture(); + CompletableFuture future = new CompletableFuture<>(); if (eventLoop.inEventLoop()) { flush0(future, syncBlock); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index 875ff77fa9e..3eaacc42f7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -682,8 +682,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper { try { stat = namenode.create(src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, - new EnumSetWritable( - overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), + new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize, CryptoProtocolVersion.supported()); } catch (Exception e) { if (e instanceof RemoteException) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index 9335ef6ca9c..5c306c093ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -205,8 +205,7 @@ public class BlockCacheUtil { /** * Map by filename. use concurent utils because we want our Map and contained blocks sorted. */ - private NavigableMap> cachedBlockByFile = - new ConcurrentSkipListMap>(); + private NavigableMap> cachedBlockByFile = new ConcurrentSkipListMap<>(); FastLongHistogram hist = new FastLongHistogram(); /** @@ -217,7 +216,7 @@ public class BlockCacheUtil { if (isFull()) return true; NavigableSet set = this.cachedBlockByFile.get(cb.getFilename()); if (set == null) { - set = new ConcurrentSkipListSet(); + set = new ConcurrentSkipListSet<>(); this.cachedBlockByFile.put(cb.getFilename(), set); } set.add(cb); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java index 5d2d54ae032..3140150e640 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java @@ -30,8 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; */ @InterfaceAudience.Private public class CacheableDeserializerIdManager { - private static final Map> registeredDeserializers = - new HashMap>(); + private static final Map> registeredDeserializers = new HashMap<>(); private static final AtomicInteger identifier = new AtomicInteger(0); /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java index a50566ac476..96dfcbdce68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java @@ -70,7 +70,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase BloomFilterChunk chunk; } - private Queue readyChunks = new LinkedList(); + private Queue readyChunks = new LinkedList<>(); /** The first key in the current Bloom filter chunk. */ private byte[] firstKeyInChunk = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 0e07d6ebc2e..c5b334a294c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -638,7 +638,7 @@ public class HFile { static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR"); static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED"); public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN"); - private final SortedMap map = new TreeMap(Bytes.BYTES_COMPARATOR); + private final SortedMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); public FileInfo() { super(); @@ -894,7 +894,7 @@ public class HFile { */ static List getStoreFiles(FileSystem fs, Path regionDir) throws IOException { - List regionHFiles = new ArrayList(); + List regionHFiles = new ArrayList<>(); PathFilter dirFilter = new FSUtils.DirFilter(fs); FileStatus[] familyDirs = fs.listStatus(regionDir, dirFilter); for(FileStatus dir : familyDirs) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 1970aded364..fba15baa097 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -1406,8 +1406,7 @@ public class HFileBlock implements Cacheable { * next blocks header seems unnecessary given we usually get the block size * from the hfile index. Review! */ - private AtomicReference prefetchedHeader = - new AtomicReference(new PrefetchedHeader()); + private AtomicReference prefetchedHeader = new AtomicReference<>(new PrefetchedHeader()); /** The size of the file we are reading from, or -1 if unknown. */ protected long fileSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 575c074d2bd..b36c292e797 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -239,7 +239,7 @@ public class HFileBlockIndex { private Cell[] blockKeys; /** Pre-computed mid-key */ - private AtomicReference midKey = new AtomicReference(); + private AtomicReference midKey = new AtomicReference<>(); /** Needed doing lookup on blocks. */ private CellComparator comparator; @@ -741,7 +741,7 @@ public class HFileBlockIndex { // keys[numEntries] = Infinity, then we are maintaining an invariant that // keys[low - 1] < key < keys[high + 1] while narrowing down the range. ByteBufferKeyOnlyKeyValue nonRootIndexkeyOnlyKV = new ByteBufferKeyOnlyKeyValue(); - ObjectIntPair pair = new ObjectIntPair(); + ObjectIntPair pair = new ObjectIntPair<>(); while (low <= high) { mid = (low + high) >>> 1; @@ -1402,20 +1402,20 @@ public class HFileBlockIndex { static class BlockIndexChunk { /** First keys of the key range corresponding to each index entry. */ - private final List blockKeys = new ArrayList(); + private final List blockKeys = new ArrayList<>(); /** Block offset in backing stream. */ - private final List blockOffsets = new ArrayList(); + private final List blockOffsets = new ArrayList<>(); /** On-disk data sizes of lower-level data or index blocks. */ - private final List onDiskDataSizes = new ArrayList(); + private final List onDiskDataSizes = new ArrayList<>(); /** * The cumulative number of sub-entries, i.e. entries on deeper-level block * index entries. numSubEntriesAt[i] is the number of sub-entries in the * blocks corresponding to this chunk's entries #0 through #i inclusively. */ - private final List numSubEntriesAt = new ArrayList(); + private final List numSubEntriesAt = new ArrayList<>(); /** * The offset of the next entry to be added, relative to the end of the @@ -1434,8 +1434,7 @@ public class HFileBlockIndex { * records in a "non-root" format block. These offsets are relative to the * end of this secondary index. */ - private final List secondaryIndexOffsetMarks = - new ArrayList(); + private final List secondaryIndexOffsetMarks = new ArrayList<>(); /** * Adds a new entry to this block index chunk. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 1710379b171..030a25e0507 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -122,7 +122,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { */ private byte[] row = null; - private List files = new ArrayList(); + private List files = new ArrayList<>(); private int count; private static final String FOUR_SPACES = " "; @@ -232,7 +232,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { if (verbose) { System.out.println("checkMobIntegrity is enabled"); } - mobFileLocations = new HashMap>(); + mobFileLocations = new HashMap<>(); } cmd.getArgList().forEach((file) -> files.add(new Path(file))); @@ -372,8 +372,8 @@ public class HFilePrettyPrinter extends Configured implements Tool { HFileScanner scanner, byte[] row) throws IOException { Cell pCell = null; FileSystem fs = FileSystem.get(getConf()); - Set foundMobFiles = new LinkedHashSet(FOUND_MOB_FILES_CACHE_CAPACITY); - Set missingMobFiles = new LinkedHashSet(MISSING_MOB_FILES_CACHE_CAPACITY); + Set foundMobFiles = new LinkedHashSet<>(FOUND_MOB_FILES_CACHE_CAPACITY); + Set missingMobFiles = new LinkedHashSet<>(MISSING_MOB_FILES_CACHE_CAPACITY); do { Cell cell = scanner.getCell(); if (row != null && row.length != 0) { @@ -469,7 +469,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { String tableName = tn.getNameAsString(); List locations = mobFileLocations.get(tableName); if (locations == null) { - locations = new ArrayList(2); + locations = new ArrayList<>(2); locations.add(MobUtils.getMobFamilyPath(getConf(), tn, family)); locations.add(HFileArchiveUtil.getStoreArchivePath(getConf(), tn, MobUtils.getMobRegionInfo(tn).getEncodedName(), family)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index c92d77d8e4e..4e8cbaa0329 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -138,7 +138,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { * Blocks read from the load-on-open section, excluding data root index, meta * index, and file info. */ - private List loadOnOpenBlocks = new ArrayList(); + private List loadOnOpenBlocks = new ArrayList<>(); /** Minimum minor version supported by this HFile format */ static final int MIN_MINOR_VERSION = 0; @@ -493,7 +493,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // buffer backed keyonlyKV private ByteBufferKeyOnlyKeyValue bufBackedKeyOnlyKv = new ByteBufferKeyOnlyKeyValue(); // A pair for reusing in blockSeek() so that we don't garbage lot of objects - final ObjectIntPair pair = new ObjectIntPair(); + final ObjectIntPair pair = new ObjectIntPair<>(); /** * The next indexed key is to keep track of the indexed key of the next data block. @@ -506,7 +506,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // Current block being used protected HFileBlock curBlock; // Previous blocks that were used in the course of the read - protected final ArrayList prevBlocks = new ArrayList(); + protected final ArrayList prevBlocks = new ArrayList<>(); public HFileScannerImpl(final HFile.Reader reader, final boolean cacheBlocks, final boolean pread, final boolean isCompaction) { @@ -975,7 +975,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { public Cell getKey() { assertSeeked(); // Create a new object so that this getKey is cached as firstKey, lastKey - ObjectIntPair keyPair = new ObjectIntPair(); + ObjectIntPair keyPair = new ObjectIntPair<>(); blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen, keyPair); ByteBuffer keyBuf = keyPair.getFirst(); if (keyBuf.hasArray()) { @@ -996,7 +996,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { public ByteBuffer getValue() { assertSeeked(); // Okie to create new Pair. Not used in hot path - ObjectIntPair valuePair = new ObjectIntPair(); + ObjectIntPair valuePair = new ObjectIntPair<>(); this.blockBuffer.asSubByteBuffer(blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen, currValueLen, valuePair); ByteBuffer valBuf = valuePair.getFirst().duplicate(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 8a2d2381c87..6a20b99c0e0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -91,10 +91,10 @@ public class HFileWriterImpl implements HFile.Writer { protected final CellComparator comparator; /** Meta block names. */ - protected List metaNames = new ArrayList(); + protected List metaNames = new ArrayList<>(); /** {@link Writable}s representing meta block data. */ - protected List metaData = new ArrayList(); + protected List metaData = new ArrayList<>(); /** * First cell in a block. @@ -132,7 +132,7 @@ public class HFileWriterImpl implements HFile.Writer { public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1; /** Inline block writers for multi-level block index and compound Blooms. */ - private List inlineBlockWriters = new ArrayList(); + private List inlineBlockWriters = new ArrayList<>(); /** block writer */ protected HFileBlock.Writer blockWriter; @@ -153,7 +153,7 @@ public class HFileWriterImpl implements HFile.Writer { private Cell lastCellOfPreviousBlock = null; /** Additional data items to be written to the "load-on-open" section. */ - private List additionalLoadOnOpenData = new ArrayList(); + private List additionalLoadOnOpenData = new ArrayList<>(); protected long maxMemstoreTS = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java index 61deef5f179..838fa418898 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java @@ -42,8 +42,7 @@ public class PrefetchExecutor { private static final Log LOG = LogFactory.getLog(PrefetchExecutor.class); /** Futures for tracking block prefetch activity */ - private static final Map> prefetchFutures = - new ConcurrentSkipListMap>(); + private static final Map> prefetchFutures = new ConcurrentSkipListMap<>(); /** Executor pool shared among all HFiles for block prefetch */ private static final ScheduledExecutorService prefetchExecutorPool; /** Delay before beginning prefetch */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 1bcdfc44719..cb23ca91268 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -142,8 +142,7 @@ public class BucketCache implements BlockCache, HeapSize { * to the BucketCache. It then updates the ramCache and backingMap accordingly. */ @VisibleForTesting - final ArrayList> writerQueues = - new ArrayList>(); + final ArrayList> writerQueues = new ArrayList<>(); @VisibleForTesting final WriterThread[] writerThreads; @@ -151,7 +150,7 @@ public class BucketCache implements BlockCache, HeapSize { private volatile boolean freeInProgress = false; private final Lock freeSpaceLock = new ReentrantLock(); - private UniqueIndexMap deserialiserMap = new UniqueIndexMap(); + private UniqueIndexMap deserialiserMap = new UniqueIndexMap<>(); private final AtomicLong realCacheSize = new AtomicLong(0); private final AtomicLong heapSize = new AtomicLong(0); @@ -191,7 +190,7 @@ public class BucketCache implements BlockCache, HeapSize { final IdReadWriteLock offsetLock = new IdReadWriteLock(); private final NavigableSet blocksByHFile = - new ConcurrentSkipListSet(new Comparator() { + new ConcurrentSkipListSet<>(new Comparator() { @Override public int compare(BlockCacheKey a, BlockCacheKey b) { int nameComparison = a.getHfileName().compareTo(b.getHfileName()); @@ -240,13 +239,13 @@ public class BucketCache implements BlockCache, HeapSize { bucketAllocator = new BucketAllocator(capacity, bucketSizes); for (int i = 0; i < writerThreads.length; ++i) { - writerQueues.add(new ArrayBlockingQueue(writerQLen)); + writerQueues.add(new ArrayBlockingQueue<>(writerQLen)); } assert writerQueues.size() == writerThreads.length; - this.ramCache = new ConcurrentHashMap(); + this.ramCache = new ConcurrentHashMap<>(); - this.backingMap = new ConcurrentHashMap((int) blockNumCapacity); + this.backingMap = new ConcurrentHashMap<>((int) blockNumCapacity); if (ioEngine.isPersistent() && persistencePath != null) { try { @@ -756,7 +755,7 @@ public class BucketCache implements BlockCache, HeapSize { } } - PriorityQueue bucketQueue = new PriorityQueue(3); + PriorityQueue bucketQueue = new PriorityQueue<>(3); bucketQueue.add(bucketSingle); bucketQueue.add(bucketMulti); @@ -841,7 +840,7 @@ public class BucketCache implements BlockCache, HeapSize { } public void run() { - List entries = new ArrayList(); + List entries = new ArrayList<>(); try { while (cacheEnabled && writerEnabled) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java index 9a72c4eeed7..a3003c9602c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/UniqueIndexMap.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; public final class UniqueIndexMap implements Serializable { private static final long serialVersionUID = -1145635738654002342L; - ConcurrentHashMap mForwardMap = new ConcurrentHashMap(); - ConcurrentHashMap mReverseMap = new ConcurrentHashMap(); + ConcurrentHashMap mForwardMap = new ConcurrentHashMap<>(); + ConcurrentHashMap mReverseMap = new ConcurrentHashMap<>(); AtomicInteger mIndex = new AtomicInteger(0); // Map a length to an index. If we can't, allocate a new mapping. We might diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java index 100f751a695..cf99f8bd62c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/util/MemorySizeUtil.java @@ -169,7 +169,7 @@ public class MemorySizeUtil { if (MemStoreLAB.isEnabled(conf)) { // We are in offheap Memstore use long globalMemStoreLimit = (long) (offheapMSGlobal * 1024 * 1024); // Size in bytes - return new Pair(globalMemStoreLimit, MemoryType.NON_HEAP); + return new Pair<>(globalMemStoreLimit, MemoryType.NON_HEAP); } else { // Off heap max memstore size is configured with turning off MSLAB. It makes no sense. Do a // warn log and go with on heap memstore percentage. By default it will be 40% of Xmx @@ -178,7 +178,7 @@ public class MemorySizeUtil { + " Going with on heap global memstore size ('" + MEMSTORE_SIZE_KEY + "')"); } } - return new Pair(getOnheapGlobalMemstoreSize(conf), MemoryType.HEAP); + return new Pair<>(getOnheapGlobalMemstoreSize(conf), MemoryType.HEAP); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java index a9b6fd13399..4ebfcd913ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java @@ -60,7 +60,7 @@ public class FifoRpcScheduler extends RpcScheduler { handlerCount, 60, TimeUnit.SECONDS, - new ArrayBlockingQueue(maxQueueLength), + new ArrayBlockingQueue<>(maxQueueLength), new DaemonThreadFactory("FifoRpcScheduler.handler"), new ThreadPoolExecutor.CallerRunsPolicy()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 7813bf4191b..4b0c9749b4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -145,12 +145,10 @@ public abstract class RpcServer implements RpcServerInterface, /** This is set to Call object before Handler invokes an RPC and ybdie * after the call returns. */ - protected static final ThreadLocal CurCall = - new ThreadLocal(); + protected static final ThreadLocal CurCall = new ThreadLocal<>(); /** Keeps MonitoredRPCHandler per handler thread. */ - protected static final ThreadLocal MONITORED_RPC - = new ThreadLocal(); + protected static final ThreadLocal MONITORED_RPC = new ThreadLocal<>(); protected final InetSocketAddress bindAddress; @@ -413,7 +411,7 @@ public abstract class RpcServer implements RpcServerInterface, this.connection.compressionCodec, cells); if (b != null) { cellBlockSize = b.remaining(); - cellBlock = new ArrayList(1); + cellBlock = new ArrayList<>(1); cellBlock.add(b); } } @@ -1177,7 +1175,7 @@ public abstract class RpcServer implements RpcServerInterface, status.getClient(), startTime, processingTime, qTime, responseSize); } - return new Pair(result, controller.cellScanner()); + return new Pair<>(result, controller.cellScanner()); } catch (Throwable e) { // The above callBlockingMethod will always return a SE. Strip the SE wrapper before // putting it on the wire. Its needed to adhere to the pb Service Interface but we don't @@ -1218,7 +1216,7 @@ public abstract class RpcServer implements RpcServerInterface, String clientAddress, long startTime, int processingTime, int qTime, long responseSize) throws IOException { // base information that is reported regardless of type of call - Map responseInfo = new HashMap(); + Map responseInfo = new HashMap<>(); responseInfo.put("starttimems", startTime); responseInfo.put("processingtimems", processingTime); responseInfo.put("queuetimems", qTime); @@ -1299,7 +1297,7 @@ public abstract class RpcServer implements RpcServerInterface, static Pair allocateByteBuffToReadInto(ByteBufferPool pool, int minSizeForPoolUse, int reqLen) { ByteBuff resultBuf; - List bbs = new ArrayList((reqLen / pool.getBufferSize()) + 1); + List bbs = new ArrayList<>((reqLen / pool.getBufferSize()) + 1); int remain = reqLen; ByteBuffer buf = null; while (remain >= minSizeForPoolUse && (buf = pool.getBuffer()) != null) { @@ -1325,14 +1323,14 @@ public abstract class RpcServer implements RpcServerInterface, resultBuf.limit(reqLen); if (bufsFromPool != null) { final ByteBuffer[] bufsFromPoolFinal = bufsFromPool; - return new Pair(resultBuf, () -> { + return new Pair<>(resultBuf, () -> { // Return back all the BBs to pool for (int i = 0; i < bufsFromPoolFinal.length; i++) { pool.putbackBuffer(bufsFromPoolFinal[i]); } }); } - return new Pair(resultBuf, null); + return new Pair<>(resultBuf, null); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 075d8b84c2d..9e1e81e85ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -259,8 +259,7 @@ public class SimpleRpcServer extends RpcServer { private final Selector readSelector; Reader() throws IOException { - this.pendingConnections = - new LinkedBlockingQueue(readerPendingConnectionQueueLength); + this.pendingConnections = new LinkedBlockingQueue<>(readerPendingConnectionQueueLength); this.readSelector = Selector.open(); } @@ -603,7 +602,7 @@ public class SimpleRpcServer extends RpcServer { return lastPurgeTime; } - ArrayList conWithOldCalls = new ArrayList(); + ArrayList conWithOldCalls = new ArrayList<>(); // get the list of channels from list of keys. synchronized (writeSelector.keys()) { for (SelectionKey key : writeSelector.keys()) { @@ -763,7 +762,7 @@ public class SimpleRpcServer extends RpcServer { protected SocketChannel channel; private ByteBuff data; private ByteBuffer dataLengthBuffer; - protected final ConcurrentLinkedDeque responseQueue = new ConcurrentLinkedDeque(); + protected final ConcurrentLinkedDeque responseQueue = new ConcurrentLinkedDeque<>(); private final Lock responseWriteLock = new ReentrantLock(); private LongAdder rpcCount = new LongAdder(); // number of outstanding rpcs private long lastContact; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java index ee6da759af5..e1ca999f1bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java @@ -116,7 +116,7 @@ implements TableMap { */ protected byte[][] extractKeyValues(Result r) { byte[][] keyVals = null; - ArrayList foundList = new ArrayList(); + ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { for (Cell value: r.listCells()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index 819ef57b7e2..8f0504a0192 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -120,7 +120,7 @@ public class CopyTable extends Configured implements Tool { if(families != null) { String[] fams = families.split(","); - Map cfRenameMap = new HashMap(); + Map cfRenameMap = new HashMap<>(); for(String fam : fams) { String sourceCf; if(fam.contains(":")) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java index 10e34d2c259..004ee5c6ac6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java @@ -53,7 +53,7 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression private static final Log LOG = LogFactory.getLog(DefaultVisibilityExpressionResolver.class); private Configuration conf; - private final Map labels = new HashMap(); + private final Map labels = new HashMap<>(); @Override public Configuration getConf() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java index 8a9fa495324..44e43c8401a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java @@ -106,7 +106,7 @@ extends TableMapper implements Configurable { */ protected byte[][] extractKeyValues(Result r) { byte[][] keyVals = null; - ArrayList foundList = new ArrayList(); + ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { for (Cell value: r.listCells()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 13ea5c583a1..1ce5f60fdb9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -137,7 +137,7 @@ public class HFileOutputFormat2 static RecordWriter createRecordWriter(final TaskAttemptContext context) throws IOException { - return new HFileRecordWriter(context, null); + return new HFileRecordWriter<>(context, null); } protected static class HFileRecordWriter @@ -211,7 +211,7 @@ public class HFileOutputFormat2 overriddenEncoding = null; } - writers = new TreeMap(Bytes.BYTES_COMPARATOR); + writers = new TreeMap<>(Bytes.BYTES_COMPARATOR); previousRow = HConstants.EMPTY_BYTE_ARRAY; now = Bytes.toBytes(EnvironmentEdgeManager.currentTime()); rollRequested = false; @@ -418,8 +418,7 @@ public class HFileOutputFormat2 private static List getRegionStartKeys(RegionLocator table) throws IOException { byte[][] byteKeys = table.getStartKeys(); - ArrayList ret = - new ArrayList(byteKeys.length); + ArrayList ret = new ArrayList<>(byteKeys.length); for (byte[] byteKey : byteKeys) { ret.add(new ImmutableBytesWritable(byteKey)); } @@ -442,8 +441,7 @@ public class HFileOutputFormat2 // have keys < the first region (which has an empty start key) // so we need to remove it. Otherwise we would end up with an // empty reducer with index 0 - TreeSet sorted = - new TreeSet(startKeys); + TreeSet sorted = new TreeSet<>(startKeys); ImmutableBytesWritable first = sorted.first(); if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) { @@ -587,8 +585,7 @@ public class HFileOutputFormat2 conf) { Map stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY); - Map compressionMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map compressionMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); compressionMap.put(e.getKey(), algorithm); @@ -607,8 +604,7 @@ public class HFileOutputFormat2 static Map createFamilyBloomTypeMap(Configuration conf) { Map stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); - Map bloomTypeMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { BloomType bloomType = BloomType.valueOf(e.getValue()); bloomTypeMap.put(e.getKey(), bloomType); @@ -627,8 +623,7 @@ public class HFileOutputFormat2 static Map createFamilyBlockSizeMap(Configuration conf) { Map stringMap = createFamilyConfValueMap(conf, BLOCK_SIZE_FAMILIES_CONF_KEY); - Map blockSizeMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map blockSizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Integer blockSize = Integer.parseInt(e.getValue()); blockSizeMap.put(e.getKey(), blockSize); @@ -649,8 +644,7 @@ public class HFileOutputFormat2 Configuration conf) { Map stringMap = createFamilyConfValueMap(conf, DATABLOCK_ENCODING_FAMILIES_CONF_KEY); - Map encoderMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map encoderMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue()))); } @@ -667,7 +661,7 @@ public class HFileOutputFormat2 */ private static Map createFamilyConfValueMap( Configuration conf, String confName) { - Map confValMap = new TreeMap(Bytes.BYTES_COMPARATOR); + Map confValMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); String confVal = conf.get(confName, ""); for (String familyConf : confVal.split("&")) { String[] familySplit = familyConf.split("="); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java index 674cb57d06c..2834f868983 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java @@ -213,7 +213,7 @@ public class HashTable extends Configured implements Tool { * into the desired number of partitions. */ void selectPartitions(Pair regionStartEndKeys) { - List startKeys = new ArrayList(); + List startKeys = new ArrayList<>(); for (int i = 0; i < regionStartEndKeys.getFirst().length; i++) { byte[] regionStartKey = regionStartEndKeys.getFirst()[i]; byte[] regionEndKey = regionStartEndKeys.getSecond()[i]; @@ -244,7 +244,7 @@ public class HashTable extends Configured implements Tool { } // choose a subset of start keys to group regions into ranges - partitions = new ArrayList(numHashFiles - 1); + partitions = new ArrayList<>(numHashFiles - 1); // skip the first start key as it is not a partition between ranges. for (long i = 1; i < numHashFiles; i++) { int splitIndex = (int) (numRegions * i / numHashFiles); @@ -269,7 +269,7 @@ public class HashTable extends Configured implements Tool { @SuppressWarnings("deprecation") SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf); ImmutableBytesWritable key = new ImmutableBytesWritable(); - partitions = new ArrayList(); + partitions = new ArrayList<>(); while (reader.next(key)) { partitions.add(new ImmutableBytesWritable(key.copyBytes())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index e2693b9d2a9..d1beb8d5690 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -471,7 +471,7 @@ public class Import extends Configured implements Tool { } private static ArrayList toQuotedByteArrays(String... stringArgs) { - ArrayList quotedArgs = new ArrayList(); + ArrayList quotedArgs = new ArrayList<>(); for (String stringArg : stringArgs) { // all the filters' instantiation methods expected quoted args since they are coming from // the shell, so add them here, though it shouldn't really be needed :-/ @@ -536,7 +536,7 @@ public class Import extends Configured implements Tool { String[] allMappings = allMappingsPropVal.split(","); for (String mapping: allMappings) { if(cfRenameMap == null) { - cfRenameMap = new TreeMap(Bytes.BYTES_COMPARATOR); + cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); } String [] srcAndDest = mapping.split(":"); if(srcAndDest.length != 2) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index 39085df8237..a379d538946 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -249,7 +249,7 @@ public class ImportTsv extends Configured implements Tool { public ParsedLine parse(byte[] lineBytes, int length) throws BadTsvLineException { // Enumerate separator offsets - ArrayList tabOffsets = new ArrayList(maxColumnCount); + ArrayList tabOffsets = new ArrayList<>(maxColumnCount); for (int i = 0; i < length; i++) { if (lineBytes[i] == separatorByte) { tabOffsets.add(i); @@ -448,7 +448,7 @@ public class ImportTsv extends Configured implements Tool { + " are less than row key position."); } } - return new Pair(startPos, endPos - startPos + 1); + return new Pair<>(startPos, endPos - startPos + 1); } } @@ -521,7 +521,7 @@ public class ImportTsv extends Configured implements Tool { boolean noStrict = conf.getBoolean(NO_STRICT_COL_FAMILY, false); // if no.strict is false then check column family if(!noStrict) { - ArrayList unmatchedFamilies = new ArrayList(); + ArrayList unmatchedFamilies = new ArrayList<>(); Set cfSet = getColumnFamilies(columns); HTableDescriptor tDesc = table.getTableDescriptor(); for (String cf : cfSet) { @@ -530,7 +530,7 @@ public class ImportTsv extends Configured implements Tool { } } if(unmatchedFamilies.size() > 0) { - ArrayList familyNames = new ArrayList(); + ArrayList familyNames = new ArrayList<>(); for (HColumnDescriptor family : table.getTableDescriptor().getFamilies()) { familyNames.add(family.getNameAsString()); } @@ -626,7 +626,7 @@ public class ImportTsv extends Configured implements Tool { } private static Set getColumnFamilies(String[] columns) { - Set cfSet = new HashSet(); + Set cfSet = new HashSet<>(); for (String aColumn : columns) { if (TsvParser.ROWKEY_COLUMN_SPEC.equals(aColumn) || TsvParser.TIMESTAMPKEY_COLUMN_SPEC.equals(aColumn) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java index f6c7a903eaa..d37ab94f69e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java @@ -40,7 +40,7 @@ public class KeyValueSortReducer extends Reducer kvs, org.apache.hadoop.mapreduce.Reducer.Context context) throws java.io.IOException, InterruptedException { - TreeSet map = new TreeSet(CellComparator.COMPARATOR); + TreeSet map = new TreeSet<>(CellComparator.COMPARATOR); for (KeyValue kv: kvs) { try { map.add(kv.clone()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 963c4a1893c..718e88bc4e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -125,7 +125,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { private int maxFilesPerRegionPerFamily; private boolean assignSeqIds; - private Set unmatchedFamilies = new HashSet(); + private Set unmatchedFamilies = new HashSet<>(); // Source filesystem private FileSystem fs; @@ -630,7 +630,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); builder.setNameFormat("LoadIncrementalHFiles-%1$d"); ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(), builder.build()); + new LinkedBlockingQueue<>(), builder.build()); ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); return pool; } @@ -889,7 +889,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { // Add these back at the *front* of the queue, so there's a lower // chance that the region will just split again before we get there. - List lqis = new ArrayList(2); + List lqis = new ArrayList<>(2); lqis.add(new LoadQueueItem(item.family, botOut)); lqis.add(new LoadQueueItem(item.family, topOut)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java index 7c1ebbc9bf4..dc2fc0d0e0b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java @@ -65,8 +65,7 @@ public class MultiHFileOutputFormat extends FileOutputFormat> tableWriters = - new HashMap>(); + final Map> tableWriters = new HashMap<>(); return new RecordWriter() { @Override @@ -82,7 +81,7 @@ public class MultiHFileOutputFormat extends FileOutputFormat(context, tableOutputDir); + tableWriter = new HFileOutputFormat2.HFileRecordWriter<>(context, tableOutputDir); // Put table into map tableWriters.put(tableName, tableWriter); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java index 48a982b2ad5..3099c0dd91e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java @@ -92,7 +92,7 @@ public class MultiTableInputFormat extends MultiTableInputFormatBase implements throw new IllegalArgumentException("There must be at least 1 scan configuration set to : " + SCANS); } - List scans = new ArrayList(); + List scans = new ArrayList<>(); for (int i = 0; i < rawScans.length; i++) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java index 4931c3fd210..25ea047b7dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java @@ -163,7 +163,7 @@ public abstract class MultiTableInputFormatBase extends throw new IOException("No scans were provided."); } - Map> tableMaps = new HashMap>(); + Map> tableMaps = new HashMap<>(); for (Scan scan : scans) { byte[] tableNameBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME); if (tableNameBytes == null) @@ -173,13 +173,13 @@ public abstract class MultiTableInputFormatBase extends List scanList = tableMaps.get(tableName); if (scanList == null) { - scanList = new ArrayList(); + scanList = new ArrayList<>(); tableMaps.put(tableName, scanList); } scanList.add(scan); } - List splits = new ArrayList(); + List splits = new ArrayList<>(); Iterator iter = tableMaps.entrySet().iterator(); while (iter.hasNext()) { Map.Entry> entry = (Map.Entry>) iter.next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index 6657c99ccf4..b48580dcfad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -79,10 +79,10 @@ public class PutSortReducer extends "putsortreducer.row.threshold", 1L * (1<<30)); Iterator iter = puts.iterator(); while (iter.hasNext()) { - TreeSet map = new TreeSet(CellComparator.COMPARATOR); + TreeSet map = new TreeSet<>(CellComparator.COMPARATOR); long curSize = 0; // stop at the end or the RAM threshold - List tags = new ArrayList(); + List tags = new ArrayList<>(); while (iter.hasNext() && curSize < threshold) { // clear the tags tags.clear(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java index 1561b3b4e03..98c92ea7148 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java @@ -92,7 +92,7 @@ public class ResultSerialization extends Configured implements Serialization kvs = new ArrayList(); + List kvs = new ArrayList<>(); int offset = 0; while (offset < totalBuffer) { int keyLength = Bytes.toInt(buf, offset); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 9ebb3c132e9..7962a427e0b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -137,8 +137,7 @@ extends InputFormat { /** The reverse DNS lookup cache mapping: IPAddress => HostName */ - private HashMap reverseDNSCacheMap = - new HashMap(); + private HashMap reverseDNSCacheMap = new HashMap<>(); /** * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses @@ -262,7 +261,7 @@ extends InputFormat { if (null == regLoc) { throw new IOException("Expecting at least one region."); } - List splits = new ArrayList(1); + List splits = new ArrayList<>(1); long regionSize = sizeCalculator.getRegionSize(regLoc.getRegionInfo().getRegionName()); TableSplit split = new TableSplit(tableName, scan, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc @@ -270,7 +269,7 @@ extends InputFormat { splits.add(split); return splits; } - List splits = new ArrayList(keys.getFirst().length); + List splits = new ArrayList<>(keys.getFirst().length); for (int i = 0; i < keys.getFirst().length; i++) { if (!includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) { continue; @@ -373,7 +372,7 @@ extends InputFormat { */ private List calculateRebalancedSplits(List list, JobContext context, long average) throws IOException { - List resultList = new ArrayList(); + List resultList = new ArrayList<>(); Configuration conf = context.getConfiguration(); //The default data skew ratio is 3 long dataSkewRatio = conf.getLong(INPUT_AUTOBALANCE_MAXSKEWRATIO, 3); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index 98f39da45f5..69b486d7b93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -451,7 +451,7 @@ public class TableMapReduceUtil { job.setMapperClass(mapper); Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); - List scanStrings = new ArrayList(); + List scanStrings = new ArrayList<>(); for (Scan scan : scans) { scanStrings.add(convertScanToString(scan)); @@ -807,7 +807,7 @@ public class TableMapReduceUtil { if (conf == null) { throw new IllegalArgumentException("Must provide a configuration object."); } - Set paths = new HashSet(conf.getStringCollection("tmpjars")); + Set paths = new HashSet<>(conf.getStringCollection("tmpjars")); if (paths.isEmpty()) { throw new IllegalArgumentException("Configuration contains no tmpjars."); } @@ -879,13 +879,13 @@ public class TableMapReduceUtil { Class... classes) throws IOException { FileSystem localFs = FileSystem.getLocal(conf); - Set jars = new HashSet(); + Set jars = new HashSet<>(); // Add jars that are already in the tmpjars variable jars.addAll(conf.getStringCollection("tmpjars")); // add jars as we find them to a map of contents jar name so that we can avoid // creating new jars for classes that have already been packaged. - Map packagedClasses = new HashMap(); + Map packagedClasses = new HashMap<>(); // Add jars containing the specified classes for (Class clazz : classes) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java index c40396f140e..b2db319b80a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java @@ -183,7 +183,7 @@ public class TableSnapshotInputFormat extends InputFormat getSplits(JobContext job) throws IOException, InterruptedException { - List results = new ArrayList(); + List results = new ArrayList<>(); for (TableSnapshotInputFormatImpl.InputSplit split : TableSnapshotInputFormatImpl.getSplits(job.getConfiguration())) { results.add(new TableSnapshotRegionSplit(split)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java index d52703a5912..69beef8930a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java @@ -311,7 +311,7 @@ public class TableSnapshotInputFormatImpl { Path tableDir = FSUtils.getTableDir(restoreDir, htd.getTableName()); - List splits = new ArrayList(); + List splits = new ArrayList<>(); for (HRegionInfo hri : regionManifests) { // load region descriptor @@ -346,7 +346,7 @@ public class TableSnapshotInputFormatImpl { */ public static List getBestLocations( Configuration conf, HDFSBlocksDistribution blockDistribution) { - List locations = new ArrayList(3); + List locations = new ArrayList<>(3); HostAndWeight[] hostAndWeights = blockDistribution.getTopHostsWithWeights(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index 1e09f037a5a..05a48204b64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -146,7 +146,7 @@ public class TextSortReducer extends "reducer.row.threshold", 1L * (1<<30)); Iterator iter = lines.iterator(); while (iter.hasNext()) { - Set kvs = new TreeSet(CellComparator.COMPARATOR); + Set kvs = new TreeSet<>(CellComparator.COMPARATOR); long curSize = 0; // stop at the end or the RAM threshold while (iter.hasNext() && curSize < threshold) { @@ -160,7 +160,7 @@ public class TextSortReducer extends ttl = parsed.getCellTTL(); // create tags for the parsed line - List tags = new ArrayList(); + List tags = new ArrayList<>(); if (cellVisibilityExpr != null) { tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( cellVisibilityExpr)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java index 94bcb43061d..08b5aab6783 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java @@ -113,7 +113,7 @@ extends Mapper throw new RuntimeException("No row key column specified"); } this.kvCreator = new CellCreator(conf); - tags = new ArrayList(); + tags = new ArrayList<>(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 02fcbba122c..8514ace0a73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -239,7 +239,7 @@ public class WALInputFormat extends InputFormat { FileSystem fs = inputDir.getFileSystem(conf); List files = getFiles(fs, inputDir, startTime, endTime); - List splits = new ArrayList(files.size()); + List splits = new ArrayList<>(files.size()); for (FileStatus file : files) { splits.add(new WALSplit(file.getPath().toString(), file.getLen(), startTime, endTime)); } @@ -248,7 +248,7 @@ public class WALInputFormat extends InputFormat { private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException { - List result = new ArrayList(); + List result = new ArrayList<>(); LOG.debug("Scanning " + dir.toString() + " for WAL files"); FileStatus[] files = fs.listStatus(dir); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 2c67baff2ca..cca2041b8b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -132,7 +132,7 @@ public class WALPlayer extends Configured implements Tool { */ protected static class WALMapper extends Mapper { - private Map tables = new TreeMap(); + private Map tables = new TreeMap<>(); @Override public void map(WALKey key, WALEdit value, Context context) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 60ad545a79b..69ebd97e6ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -115,7 +115,7 @@ public class AssignmentManager { private AtomicInteger numRegionsOpened = new AtomicInteger(0); - final private KeyLocker locker = new KeyLocker(); + final private KeyLocker locker = new KeyLocker<>(); Set replicasToClose = Collections.synchronizedSet(new HashSet()); @@ -141,8 +141,7 @@ public class AssignmentManager { // TODO: When do plans get cleaned out? Ever? In server open and in server // shutdown processing -- St.Ack // All access to this Map must be synchronized. - final NavigableMap regionPlans = - new TreeMap(); + final NavigableMap regionPlans = new TreeMap<>(); private final TableStateManager tableStateManager; @@ -183,8 +182,7 @@ public class AssignmentManager { * because we don't expect this to happen frequently; we don't * want to copy this information over during each state transition either. */ - private final ConcurrentHashMap - failedOpenTracker = new ConcurrentHashMap(); + private final ConcurrentHashMap failedOpenTracker = new ConcurrentHashMap<>(); // In case not using ZK for region assignment, region states // are persisted in meta with a state store @@ -197,7 +195,7 @@ public class AssignmentManager { public static boolean TEST_SKIP_SPLIT_HANDLING = false; /** Listeners that are called on assignment events. */ - private List listeners = new CopyOnWriteArrayList(); + private List listeners = new CopyOnWriteArrayList<>(); private RegionStateListener regionStateListener; @@ -382,7 +380,7 @@ public class AssignmentManager { pending++; } } - return new Pair(pending, hris.size()); + return new Pair<>(pending, hris.size()); } /** @@ -748,16 +746,16 @@ public class AssignmentManager { return true; } LOG.info("Assigning " + regionCount + " region(s) to " + destination.toString()); - Set encodedNames = new HashSet(regionCount); + Set encodedNames = new HashSet<>(regionCount); for (HRegionInfo region : regions) { encodedNames.add(region.getEncodedName()); } - List failedToOpenRegions = new ArrayList(); + List failedToOpenRegions = new ArrayList<>(); Map locks = locker.acquireLocks(encodedNames); try { - Map plans = new HashMap(regionCount); - List states = new ArrayList(regionCount); + Map plans = new HashMap<>(regionCount); + List states = new ArrayList<>(regionCount); for (HRegionInfo region : regions) { String encodedName = region.getEncodedName(); if (!isDisabledorDisablingRegionInRIT(region)) { @@ -797,8 +795,7 @@ public class AssignmentManager { // that unnecessary timeout on RIT is reduced. this.addPlans(plans); - List>> regionOpenInfos = - new ArrayList>>(states.size()); + List>> regionOpenInfos = new ArrayList<>(states.size()); for (RegionState state: states) { HRegionInfo region = state.getRegion(); regionStates.updateRegionState( @@ -807,8 +804,7 @@ public class AssignmentManager { if (shouldAssignFavoredNodes(region)) { favoredNodes = server.getFavoredNodesManager().getFavoredNodesWithDNPort(region); } - regionOpenInfos.add(new Pair>( - region, favoredNodes)); + regionOpenInfos.add(new Pair<>(region, favoredNodes)); } // Move on to open regions. @@ -908,7 +904,7 @@ public class AssignmentManager { } // wait for assignment completion - ArrayList userRegionSet = new ArrayList(regions.size()); + ArrayList userRegionSet = new ArrayList<>(regions.size()); for (HRegionInfo region: regions) { if (!region.getTable().isSystemTable()) { userRegionSet.add(region); @@ -1443,7 +1439,7 @@ public class AssignmentManager { */ public boolean waitForAssignment(HRegionInfo regionInfo) throws InterruptedException { - ArrayList regionSet = new ArrayList(1); + ArrayList regionSet = new ArrayList<>(1); regionSet.add(regionInfo); return waitForAssignment(regionSet, true, Long.MAX_VALUE); } @@ -1588,7 +1584,7 @@ public class AssignmentManager { } // invoke assignment (async) - ArrayList userRegionSet = new ArrayList(regions); + ArrayList userRegionSet = new ArrayList<>(regions); for (Map.Entry> plan: bulkPlan.entrySet()) { if (!assign(plan.getKey(), plan.getValue()) && !server.isStopped()) { for (HRegionInfo region: plan.getValue()) { @@ -1640,7 +1636,7 @@ public class AssignmentManager { if (retainAssignment) { assign(allRegions); } else { - List regions = new ArrayList(regionsFromMetaScan); + List regions = new ArrayList<>(regionsFromMetaScan); assign(regions); } @@ -1687,7 +1683,7 @@ public class AssignmentManager { */ public static List replicaRegionsNotRecordedInMeta( Set regionsRecordedInMeta, MasterServices master)throws IOException { - List regionsNotRecordedInMeta = new ArrayList(); + List regionsNotRecordedInMeta = new ArrayList<>(); for (HRegionInfo hri : regionsRecordedInMeta) { TableName table = hri.getTable(); if(master.getTableDescriptors().get(table) == null) @@ -1723,7 +1719,7 @@ public class AssignmentManager { // Get any new but slow to checkin region server that joined the cluster Set onlineServers = serverManager.getOnlineServers().keySet(); // Set of offline servers to be returned - Set offlineServers = new HashSet(); + Set offlineServers = new HashSet<>(); // Iterate regions in META for (Result result : results) { if (result == null && LOG.isDebugEnabled()){ @@ -2446,7 +2442,7 @@ public class AssignmentManager { threadPoolExecutorService.submit(splitReplicasCallable); // wait for assignment completion - ArrayList regionAssignSet = new ArrayList(2); + ArrayList regionAssignSet = new ArrayList<>(2); regionAssignSet.add(daughterAHRI); regionAssignSet.add(daughterBHRI); while (!waitForAssignment(regionAssignSet, true, regionAssignSet.size(), @@ -2558,7 +2554,7 @@ public class AssignmentManager { final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - Set encodedNames = new HashSet(2); + Set encodedNames = new HashSet<>(2); encodedNames.add(a.getEncodedName()); encodedNames.add(b.getEncodedName()); Map locks = locker.acquireLocks(encodedNames); @@ -2645,7 +2641,7 @@ public class AssignmentManager { threadPoolExecutorService.submit(mergeReplicasCallable); // wait for assignment completion - ArrayList regionAssignSet = new ArrayList(1); + ArrayList regionAssignSet = new ArrayList<>(1); regionAssignSet.add(mergedRegion); while (!waitForAssignment(regionAssignSet, true, regionAssignSet.size(), Long.MAX_VALUE)) { LOG.debug("The merged region " + mergedRegion + " is still in transition. "); @@ -2754,7 +2750,7 @@ public class AssignmentManager { final HRegionInfo hri_b) { // Close replicas for the original unmerged regions. create/assign new replicas // for the merged parent. - List unmergedRegions = new ArrayList(); + List unmergedRegions = new ArrayList<>(); unmergedRegions.add(hri_a); unmergedRegions.add(hri_b); Map> map = regionStates.getRegionAssignments(unmergedRegions); @@ -2768,7 +2764,7 @@ public class AssignmentManager { } } int numReplicas = getNumReplicas(server, mergedHri.getTable()); - List regions = new ArrayList(); + List regions = new ArrayList<>(); for (int i = 1; i < numReplicas; i++) { regions.add(RegionReplicaUtil.getRegionInfoForReplica(mergedHri, i)); } @@ -2790,7 +2786,7 @@ public class AssignmentManager { // the replica1s of daughters will be on the same machine int numReplicas = getNumReplicas(server, parentHri.getTable()); // unassign the old replicas - List parentRegion = new ArrayList(); + List parentRegion = new ArrayList<>(); parentRegion.add(parentHri); Map> currentAssign = regionStates.getRegionAssignments(parentRegion); @@ -2804,7 +2800,7 @@ public class AssignmentManager { } } // assign daughter replicas - Map map = new HashMap(); + Map map = new HashMap<>(); for (int i = 1; i < numReplicas; i++) { prepareDaughterReplicaForAssignment(hri_a, parentHri, i, map); prepareDaughterReplicaForAssignment(hri_b, parentHri, i, map); @@ -2856,7 +2852,7 @@ public class AssignmentManager { sendRegionClosedNotification(regionInfo); // also note that all the replicas of the primary should be closed if (state != null && state.equals(State.SPLIT)) { - Collection c = new ArrayList(1); + Collection c = new ArrayList<>(1); c.add(regionInfo); Map> map = regionStates.getRegionAssignments(c); Collection> allReplicas = map.values(); @@ -2865,7 +2861,7 @@ public class AssignmentManager { } } else if (state != null && state.equals(State.MERGED)) { - Collection c = new ArrayList(1); + Collection c = new ArrayList<>(1); c.add(regionInfo); Map> map = regionStates.getRegionAssignments(c); Collection> allReplicas = map.values(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java index d290f26b79a..e1922af560e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java @@ -51,16 +51,13 @@ public class AssignmentVerificationReport { private int totalRegions = 0; private int totalRegionServers = 0; // for unassigned regions - private List unAssignedRegionsList = - new ArrayList(); + private List unAssignedRegionsList = new ArrayList<>(); // For regions without valid favored nodes - private List regionsWithoutValidFavoredNodes = - new ArrayList(); + private List regionsWithoutValidFavoredNodes = new ArrayList<>(); // For regions not running on the favored nodes - private List nonFavoredAssignedRegionList = - new ArrayList(); + private List nonFavoredAssignedRegionList = new ArrayList<>(); // For regions running on the favored nodes private int totalFavoredAssignments = 0; @@ -73,26 +70,20 @@ public class AssignmentVerificationReport { private float avgRegionsOnRS = 0; private int maxRegionsOnRS = 0; private int minRegionsOnRS = Integer.MAX_VALUE; - private Set mostLoadedRSSet = - new HashSet(); - private Set leastLoadedRSSet = - new HashSet(); + private Set mostLoadedRSSet = new HashSet<>(); + private Set leastLoadedRSSet = new HashSet<>(); private float avgDispersionScore = 0; private float maxDispersionScore = 0; - private Set maxDispersionScoreServerSet = - new HashSet(); + private Set maxDispersionScoreServerSet = new HashSet<>(); private float minDispersionScore = Float.MAX_VALUE; - private Set minDispersionScoreServerSet = - new HashSet(); + private Set minDispersionScoreServerSet = new HashSet<>(); private float avgDispersionNum = 0; private float maxDispersionNum = 0; - private Set maxDispersionNumServerSet = - new HashSet(); + private Set maxDispersionNumServerSet = new HashSet<>(); private float minDispersionNum = Float.MAX_VALUE; - private Set minDispersionNumServerSet = - new HashSet(); + private Set minDispersionNumServerSet = new HashSet<>(); public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot, Map> regionLocalityMap) { @@ -111,13 +102,10 @@ public class AssignmentVerificationReport { Map currentAssignment = snapshot.getRegionToRegionServerMap(); // Initialize the server to its hosing region counter map - Map serverToHostingRegionCounterMap = - new HashMap(); + Map serverToHostingRegionCounterMap = new HashMap<>(); - Map primaryRSToRegionCounterMap = - new HashMap(); - Map> primaryToSecTerRSMap = - new HashMap>(); + Map primaryRSToRegionCounterMap = new HashMap<>(); + Map> primaryToSecTerRSMap = new HashMap<>(); // Check the favored nodes and its locality information // Also keep tracker of the most loaded and least loaded region servers @@ -164,7 +152,7 @@ public class AssignmentVerificationReport { // Update the primary rs to secondary and tertiary rs map Set secAndTerSet = primaryToSecTerRSMap.get(primaryRS); if (secAndTerSet == null) { - secAndTerSet = new HashSet(); + secAndTerSet = new HashSet<>(); } secAndTerSet.add(secondaryRS); secAndTerSet.add(tertiaryRS); @@ -340,10 +328,8 @@ public class AssignmentVerificationReport { plan = newPlan; } // Get the region to region server mapping - Map primaryRSToRegionCounterMap = - new HashMap(); - Map> primaryToSecTerRSMap = - new HashMap>(); + Map primaryRSToRegionCounterMap = new HashMap<>(); + Map> primaryToSecTerRSMap = new HashMap<>(); // Check the favored nodes and its locality information // Also keep tracker of the most loaded and least loaded region servers @@ -375,7 +361,7 @@ public class AssignmentVerificationReport { // Update the primary rs to secondary and tertiary rs map Set secAndTerSet = primaryToSecTerRSMap.get(primaryRS); if (secAndTerSet == null) { - secAndTerSet = new HashSet(); + secAndTerSet = new HashSet<>(); } secAndTerSet.add(secondaryRS); secAndTerSet.add(tertiaryRS); @@ -451,7 +437,7 @@ public class AssignmentVerificationReport { * */ public List getDispersionInformation() { - List dispersion = new ArrayList(); + List dispersion = new ArrayList<>(); dispersion.add(avgDispersionScore); dispersion.add(maxDispersionScore); dispersion.add(minDispersionScore); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java index 606dce42f73..d8c511ecb27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java @@ -59,7 +59,7 @@ public class BulkReOpen extends BulkAssigner { .entrySet()) { final List hris = e.getValue(); // add plans for the regions that need to be reopened - Map plans = new HashMap(); + Map plans = new HashMap<>(); for (HRegionInfo hri : hris) { RegionPlan reOpenPlan = assignmentManager.getRegionReopenPlan(hri); plans.put(hri.getEncodedName(), reOpenPlan); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index ef042af5c6c..affd44c8b72 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -151,9 +151,8 @@ public class CatalogJanitor extends ScheduledChore { final AtomicInteger count = new AtomicInteger(0); // Keep Map of found split parents. There are candidates for cleanup. // Use a comparator that has split parents come before its daughters. - final Map splitParents = - new TreeMap(new SplitParentFirstComparator()); - final Map mergedRegions = new TreeMap(); + final Map splitParents = new TreeMap<>(new SplitParentFirstComparator()); + final Map mergedRegions = new TreeMap<>(); // This visitor collects split parents and counts rows in the hbase:meta table MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { @@ -181,8 +180,7 @@ public class CatalogJanitor extends ScheduledChore { // the start row MetaTableAccessor.scanMetaForTableRegions(this.connection, visitor, tableName); - return new Triple, Map>( - count.get(), mergedRegions, splitParents); + return new Triple<>(count.get(), mergedRegions, splitParents); } /** @@ -275,7 +273,7 @@ public class CatalogJanitor extends ScheduledChore { // Now work on our list of found parents. See if any we can clean up. int splitCleaned = 0; // regions whose parents are still around - HashSet parentNotCleaned = new HashSet(); + HashSet parentNotCleaned = new HashSet<>(); for (Map.Entry e : splitParents.entrySet()) { if (this.services.isInMaintenanceMode()) { // Stop cleaning if the master is in maintenance mode @@ -398,7 +396,7 @@ public class CatalogJanitor extends ScheduledChore { Pair checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter) throws IOException { if (daughter == null) { - return new Pair(Boolean.FALSE, Boolean.FALSE); + return new Pair<>(Boolean.FALSE, Boolean.FALSE); } FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); @@ -411,12 +409,12 @@ public class CatalogJanitor extends ScheduledChore { try { if (!FSUtils.isExists(fs, daughterRegionDir)) { - return new Pair(Boolean.FALSE, Boolean.FALSE); + return new Pair<>(Boolean.FALSE, Boolean.FALSE); } } catch (IOException ioe) { LOG.error("Error trying to determine if daughter region exists, " + "assuming exists and has references", ioe); - return new Pair(Boolean.TRUE, Boolean.TRUE); + return new Pair<>(Boolean.TRUE, Boolean.TRUE); } boolean references = false; @@ -433,9 +431,9 @@ public class CatalogJanitor extends ScheduledChore { } catch (IOException e) { LOG.error("Error trying to determine referenced files from : " + daughter.getEncodedName() + ", to: " + parent.getEncodedName() + " assuming has references", e); - return new Pair(Boolean.TRUE, Boolean.TRUE); + return new Pair<>(Boolean.TRUE, Boolean.TRUE); } - return new Pair(Boolean.TRUE, Boolean.valueOf(references)); + return new Pair<>(Boolean.TRUE, Boolean.valueOf(references)); } private HTableDescriptor getTableDescriptor(final TableName tableName) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java index 52af89e1767..bf3ae7e9bd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java @@ -122,7 +122,7 @@ class ClusterSchemaServiceImpl implements ClusterSchemaService { checkIsRunning(); Set set = getTableNamespaceManager().list(); if (set == null || set.isEmpty()) return EMPTY_NAMESPACE_LIST; - List list = new ArrayList(set.size()); + List list = new ArrayList<>(set.size()); list.addAll(set); return Collections.unmodifiableList(list); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index 3b19ada971b..ea5516dfb27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -97,8 +97,7 @@ public class ClusterStatusPublisher extends ScheduledChore { private long lastMessageTime = 0; private final HMaster master; private final int messagePeriod; // time between two message - private final ConcurrentMap lastSent = - new ConcurrentHashMap(); + private final ConcurrentMap lastSent = new ConcurrentHashMap<>(); private Publisher publisher; private boolean connected = false; @@ -194,7 +193,7 @@ public class ClusterStatusPublisher extends ScheduledChore { } // We're sending the new deads first. - List> entries = new ArrayList>(); + List> entries = new ArrayList<>(); entries.addAll(lastSent.entrySet()); Collections.sort(entries, new Comparator>() { @Override @@ -205,7 +204,7 @@ public class ClusterStatusPublisher extends ScheduledChore { // With a limit of MAX_SERVER_PER_MESSAGE int max = entries.size() > MAX_SERVER_PER_MESSAGE ? MAX_SERVER_PER_MESSAGE : entries.size(); - List res = new ArrayList(max); + List res = new ArrayList<>(max); for (int i = 0; i < max; i++) { Map.Entry toSend = entries.get(i); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index c33cdcc951f..faceba22853 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -51,7 +51,7 @@ public class DeadServer { * and it's server logs are recovered, it will be told to call server startup * because by then, its regions have probably been reassigned. */ - private final Map deadServers = new HashMap(); + private final Map deadServers = new HashMap<>(); /** * Number of dead servers currently being processed @@ -102,7 +102,7 @@ public class DeadServer { public synchronized boolean areDeadServersInProgress() { return processing; } public synchronized Set copyServerNames() { - Set clone = new HashSet(deadServers.size()); + Set clone = new HashSet<>(deadServers.size()); clone.addAll(deadServers.keySet()); return clone; } @@ -177,11 +177,11 @@ public class DeadServer { * @return a sorted array list, by death time, lowest values first. */ public synchronized List> copyDeadServersSince(long ts){ - List> res = new ArrayList>(size()); + List> res = new ArrayList<>(size()); for (Map.Entry entry:deadServers.entrySet()){ if (entry.getValue() >= ts){ - res.add(new Pair(entry.getKey(), entry.getValue())); + res.add(new Pair<>(entry.getKey(), entry.getValue())); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java index 43ea52345cb..fc3607f7165 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java @@ -43,8 +43,7 @@ import org.apache.hadoop.hbase.ServerName; public class GeneralBulkAssigner extends BulkAssigner { private static final Log LOG = LogFactory.getLog(GeneralBulkAssigner.class); - private Map> failedPlans - = new ConcurrentHashMap>(); + private Map> failedPlans = new ConcurrentHashMap<>(); private ExecutorService pool; final Map> bulkPlan; @@ -82,7 +81,7 @@ public class GeneralBulkAssigner extends BulkAssigner { @Override protected boolean waitUntilDone(final long timeout) throws InterruptedException { - Set regionSet = new HashSet(); + Set regionSet = new HashSet<>(); for (List regionList : bulkPlan.values()) { regionSet.addAll(regionList); } @@ -164,7 +163,7 @@ public class GeneralBulkAssigner extends BulkAssigner { } private int reassignFailedPlans() { - List reassigningRegions = new ArrayList(); + List reassigningRegions = new ArrayList<>(); for (Map.Entry> e : failedPlans.entrySet()) { LOG.info("Failed assigning " + e.getValue().size() + " regions to server " + e.getKey() + ", reassigning them"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 78f17835fce..501d3bddf99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1063,7 +1063,7 @@ public class HMaster extends HRegionServer implements MasterServices { //start the hfile archive cleaner thread Path archiveDir = HFileArchiveUtil.getArchivePath(conf); - Map params = new HashMap(); + Map params = new HashMap<>(); params.put(MASTER, this); this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem() .getFileSystem(), archiveDir, params); @@ -1327,7 +1327,7 @@ public class HMaster extends HRegionServer implements MasterServices { Map>> assignmentsByTable = this.assignmentManager.getRegionStates().getAssignmentsByTable(); - List plans = new ArrayList(); + List plans = new ArrayList<>(); //Give the balancer the current cluster state. this.balancer.setClusterStatus(getClusterStatus()); @@ -2235,8 +2235,7 @@ public class HMaster extends HRegionServer implements MasterServices { Pair getTableRegionForRow( final TableName tableName, final byte [] rowKey) throws IOException { - final AtomicReference> result = - new AtomicReference>(null); + final AtomicReference> result = new AtomicReference<>(null); MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { @Override @@ -2345,7 +2344,7 @@ public class HMaster extends HRegionServer implements MasterServices { List backupMasters = null; if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) { - backupMasters = new ArrayList(backupMasterStrings.size()); + backupMasters = new ArrayList<>(backupMasterStrings.size()); for (String s: backupMasterStrings) { try { byte [] bytes; @@ -2852,7 +2851,7 @@ public class HMaster extends HRegionServer implements MasterServices { */ List getNamespaces() throws IOException { checkInitialized(); - final List nsds = new ArrayList(); + final List nsds = new ArrayList<>(); boolean bypass = false; if (cpHost != null) { bypass = cpHost.preListNamespaceDescriptors(nsds); @@ -2918,7 +2917,7 @@ public class HMaster extends HRegionServer implements MasterServices { public List listTableDescriptors(final String namespace, final String regex, final List tableNameList, final boolean includeSysTables) throws IOException { - List htds = new ArrayList(); + List htds = new ArrayList<>(); boolean bypass = cpHost != null? cpHost.preGetTableDescriptors(tableNameList, htds, regex): false; if (!bypass) { @@ -2939,13 +2938,13 @@ public class HMaster extends HRegionServer implements MasterServices { */ public List listTableNames(final String namespace, final String regex, final boolean includeSysTables) throws IOException { - List htds = new ArrayList(); + List htds = new ArrayList<>(); boolean bypass = cpHost != null? cpHost.preGetTableNames(htds, regex): false; if (!bypass) { htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables); if (cpHost != null) cpHost.postGetTableNames(htds, regex); } - List result = new ArrayList(htds.size()); + List result = new ArrayList<>(htds.size()); for (HTableDescriptor htd: htds) result.add(htd.getTableName()); return result; } @@ -3262,7 +3261,7 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public List listDrainingRegionServers() { String parentZnode = getZooKeeper().znodePaths.drainingZNode; - List serverNames = new ArrayList(); + List serverNames = new ArrayList<>(); List serverStrs = null; try { serverStrs = ZKUtil.listChildrenNoWatch(getZooKeeper(), parentZnode); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java index 5e1917bf2be..1988e2d3ac1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java @@ -100,7 +100,7 @@ public class MasterMetaBootstrap { throws IOException, InterruptedException, KeeperException { int numReplicas = master.getConfiguration().getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); - final Set EMPTY_SET = new HashSet(); + final Set EMPTY_SET = new HashSet<>(); for (int i = 1; i < numReplicas; i++) { assignMeta(EMPTY_SET, i); } @@ -241,7 +241,7 @@ public class MasterMetaBootstrap { */ private Set getPreviouselyFailedMetaServersFromZK() throws KeeperException { final ZooKeeperWatcher zooKeeper = master.getZooKeeper(); - Set result = new HashSet(); + Set result = new HashSet<>(); String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.znodePaths.recoveringRegionsZNode, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); List regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java index fc0ecfbd4f9..2b1232a400f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java @@ -56,7 +56,7 @@ public class MasterMobCompactionThread { final String n = Thread.currentThread().getName(); // this pool is used to run the mob compaction this.masterMobPool = new ThreadPoolExecutor(1, 2, 60, TimeUnit.SECONDS, - new SynchronousQueue(), new ThreadFactory() { + new SynchronousQueue<>(), new ThreadFactory() { @Override public Thread newThread(Runnable r) { String name = n + "-MasterMobCompaction-" + EnvironmentEdgeManager.currentTime(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 3beda05fcbe..177ee3251d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -234,7 +234,7 @@ public class MasterRpcServices extends RSRpcServices * @return list of blocking services and their security info classes that this server supports */ protected List getServices() { - List bssi = new ArrayList(5); + List bssi = new ArrayList<>(5); bssi.add(new BlockingServiceAndInterface( MasterService.newReflectiveBlockingService(this), MasterService.BlockingInterface.class)); @@ -1333,7 +1333,7 @@ public class MasterRpcServices extends RSRpcServices Pair pair = MetaTableAccessor.getRegion(master.getConnection(), regionName); if (Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),regionName)) { - pair = new Pair(HRegionInfo.FIRST_META_REGIONINFO, + pair = new Pair<>(HRegionInfo.FIRST_META_REGIONINFO, master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper())); } if (pair == null) { @@ -1491,7 +1491,7 @@ public class MasterRpcServices extends RSRpcServices throw new DoNotRetryIOException("Table " + tableName + " is not enabled"); } boolean allFiles = false; - List compactedColumns = new ArrayList(); + List compactedColumns = new ArrayList<>(); HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies(); byte[] family = null; if (request.hasFamily()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index 1f9729c2b6d..27aca94d338 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -155,7 +155,7 @@ public class MasterWalManager { boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors", WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT); - Set serverNames = new HashSet(); + Set serverNames = new HashSet<>(); Path logsDirPath = new Path(this.rootDir, HConstants.HREGION_LOGDIR_NAME); do { @@ -218,7 +218,7 @@ public class MasterWalManager { } public void splitLog(final ServerName serverName) throws IOException { - Set serverNames = new HashSet(); + Set serverNames = new HashSet<>(); serverNames.add(serverName); splitLog(serverNames); } @@ -228,7 +228,7 @@ public class MasterWalManager { * @param serverName logs belonging to this server will be split */ public void splitMetaLog(final ServerName serverName) throws IOException { - Set serverNames = new HashSet(); + Set serverNames = new HashSet<>(); serverNames.add(serverName); splitMetaLog(serverNames); } @@ -245,7 +245,7 @@ public class MasterWalManager { "We only release this lock when we set it. Updates to code that uses it should verify use " + "of the guard boolean.") private List getLogDirs(final Set serverNames) throws IOException { - List logDirs = new ArrayList(); + List logDirs = new ArrayList<>(); boolean needReleaseLock = false; if (!this.services.isInitialized()) { // during master initialization, we could have multiple places splitting a same wal diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java index 2b1fb5069e5..5c06857ca6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RackManager.java @@ -80,7 +80,7 @@ public class RackManager { public List getRack(List servers) { // just a note - switchMapping caches results (at least the implementation should unless the // resolution is really a lightweight process) - List serversAsString = new ArrayList(servers.size()); + List serversAsString = new ArrayList<>(servers.size()); for (ServerName server : servers) { serversAsString.add(server.getHostname()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java index 7acf9df7a23..ffc3e5b51f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java @@ -106,7 +106,7 @@ public class RegionPlacementMaintainer { this.conf = conf; this.enforceLocality = enforceLocality; this.enforceMinAssignmentMove = enforceMinAssignmentMove; - this.targetTableSet = new HashSet(); + this.targetTableSet = new HashSet<>(); this.rackManager = new RackManager(conf); try { this.connection = ConnectionFactory.createConnection(this.conf); @@ -163,7 +163,7 @@ public class RegionPlacementMaintainer { if (this.enforceLocality == true) { regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf); } - List reports = new ArrayList(); + List reports = new ArrayList<>(); // Iterate all the tables to fill up the verification report for (TableName table : tables) { if (!this.targetTableSet.isEmpty() && @@ -204,7 +204,7 @@ public class RegionPlacementMaintainer { assignmentSnapshot.getRegionToRegionServerMap(); // Get the all the region servers - List servers = new ArrayList(); + List servers = new ArrayList<>(); try (Admin admin = this.connection.getAdmin()) { servers.addAll(admin.getClusterStatus().getServers()); } @@ -255,15 +255,14 @@ public class RegionPlacementMaintainer { // Compute the total rack locality for each region in each rack. The total // rack locality is the sum of the localities of a region on all servers in // a rack. - Map> rackRegionLocality = - new HashMap>(); + Map> rackRegionLocality = new HashMap<>(); for (int i = 0; i < numRegions; i++) { HRegionInfo region = regions.get(i); for (int j = 0; j < regionSlots; j += slotsPerServer) { String rack = rackManager.getRack(servers.get(j / slotsPerServer)); Map rackLocality = rackRegionLocality.get(rack); if (rackLocality == null) { - rackLocality = new HashMap(); + rackLocality = new HashMap<>(); rackRegionLocality.put(rack, rackLocality); } Float localityObj = rackLocality.get(region); @@ -395,8 +394,7 @@ public class RegionPlacementMaintainer { tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment); for (int i = 0; i < numRegions; i++) { - List favoredServers = - new ArrayList(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); + List favoredServers = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); ServerName s = servers.get(primaryAssignment[i] / slotsPerServer); favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE)); @@ -417,7 +415,7 @@ public class RegionPlacementMaintainer { LOG.info("Assignment plan for secondary and tertiary generated " + "using MunkresAssignment"); } else { - Map primaryRSMap = new HashMap(); + Map primaryRSMap = new HashMap<>(); for (int i = 0; i < numRegions; i++) { primaryRSMap.put(regions.get(i), servers.get(primaryAssignment[i] / slotsPerServer)); } @@ -427,8 +425,7 @@ public class RegionPlacementMaintainer { Map secondaryAndTertiaryMap = favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap); for (int i = 0; i < numRegions; i++) { - List favoredServers = - new ArrayList(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); + List favoredServers = new ArrayList<>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); HRegionInfo currentRegion = regions.get(i); ServerName s = primaryRSMap.get(currentRegion); favoredServers.add(ServerName.valueOf(s.getHostname(), s.getPort(), @@ -614,8 +611,7 @@ public class RegionPlacementMaintainer { if (plan == null) return; LOG.info("========== Start to print the assignment plan ================"); // sort the map based on region info - Map> assignmentMap = - new TreeMap>(plan.getAssignmentMap()); + Map> assignmentMap = new TreeMap<>(plan.getAssignmentMap()); for (Map.Entry> entry : assignmentMap.entrySet()) { @@ -666,13 +662,11 @@ public class RegionPlacementMaintainer { // track of the failed and succeeded updates int succeededNum = 0; - Map failedUpdateMap = - new HashMap(); + Map failedUpdateMap = new HashMap<>(); for (Map.Entry> entry : currentAssignment.entrySet()) { - List>> regionUpdateInfos = - new ArrayList>>(); + List>> regionUpdateInfos = new ArrayList<>(); try { // Keep track of the favored updates for the current region server FavoredNodesPlan singleServerPlan = null; @@ -687,8 +681,7 @@ public class RegionPlacementMaintainer { } // Update the single server update singleServerPlan.updateFavoredNodesMap(region, favoredServerList); - regionUpdateInfos.add( - new Pair>(region, favoredServerList)); + regionUpdateInfos.add(new Pair<>(region, favoredServerList)); } } if (singleServerPlan != null) { @@ -749,7 +742,7 @@ public class RegionPlacementMaintainer { */ public Map getRegionsMovement(FavoredNodesPlan newPlan) throws IOException { - Map movesPerTable = new HashMap(); + Map movesPerTable = new HashMap<>(); SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); Map> tableToRegions = snapshot .getTableToRegionMap(); @@ -944,7 +937,7 @@ public class RegionPlacementMaintainer { if (favoredNodesArray == null) return null; - List serverList = new ArrayList(); + List serverList = new ArrayList<>(); for (String hostNameAndPort : favoredNodesArray) { serverList.add(ServerName.valueOf(hostNameAndPort, ServerName.NON_STARTCODE)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 4125eeaadd6..a1e24f2fb4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -80,41 +80,35 @@ public class RegionStates { /** * Regions currently in transition. */ - final HashMap regionsInTransition = - new HashMap(); + final HashMap regionsInTransition = new HashMap<>(); /** * Region encoded name to state map. * All the regions should be in this map. */ - private final Map regionStates = - new HashMap(); + private final Map regionStates = new HashMap<>(); /** * Holds mapping of table -> region state */ - private final Map> regionStatesTableIndex = - new HashMap>(); + private final Map> regionStatesTableIndex = new HashMap<>(); /** * Server to regions assignment map. * Contains the set of regions currently assigned to a given server. */ - private final Map> serverHoldings = - new HashMap>(); + private final Map> serverHoldings = new HashMap<>(); /** * Maintains the mapping from the default region to the replica regions. */ - private final Map> defaultReplicaToOtherReplicas = - new HashMap>(); + private final Map> defaultReplicaToOtherReplicas = new HashMap<>(); /** * Region to server assignment map. * Contains the server a given region is currently assigned to. */ - private final TreeMap regionAssignments = - new TreeMap(); + private final TreeMap regionAssignments = new TreeMap<>(); /** * Encoded region name to server assignment map for re-assignment @@ -126,8 +120,7 @@ public class RegionStates { * is offline while the info in lastAssignments is cleared when * the region is closed or the server is dead and processed. */ - private final HashMap lastAssignments = - new HashMap(); + private final HashMap lastAssignments = new HashMap<>(); /** * Encoded region name to server assignment map for the @@ -138,16 +131,14 @@ public class RegionStates { * to match the meta. We need this map to find out the old server * whose serverHoldings needs cleanup, given a moved region. */ - private final HashMap oldAssignments = - new HashMap(); + private final HashMap oldAssignments = new HashMap<>(); /** * Map a host port pair string to the latest start code * of a region server which is known to be dead. It is dead * to us, but server manager may not know it yet. */ - private final HashMap deadServers = - new HashMap(); + private final HashMap deadServers = new HashMap<>(); /** * Map a dead servers to the time when log split is done. @@ -156,8 +147,7 @@ public class RegionStates { * on a configured time. By default, we assume a dead * server should be done with log splitting in two hours. */ - private final HashMap processedServers = - new HashMap(); + private final HashMap processedServers = new HashMap<>(); private long lastProcessedServerCleanTime; private final TableStateManager tableStateManager; @@ -181,7 +171,7 @@ public class RegionStates { * @return a copy of the region assignment map */ public synchronized Map getRegionAssignments() { - return new TreeMap(regionAssignments); + return new TreeMap<>(regionAssignments); } /** @@ -191,7 +181,7 @@ public class RegionStates { */ synchronized Map> getRegionAssignments( Collection regions) { - Map> map = new HashMap>(); + Map> map = new HashMap<>(); for (HRegionInfo region : regions) { HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(region); Set allReplicas = defaultReplicaToOtherReplicas.get(defaultReplica); @@ -201,7 +191,7 @@ public class RegionStates { if (server != null) { List regionsOnServer = map.get(server); if (regionsOnServer == null) { - regionsOnServer = new ArrayList(1); + regionsOnServer = new ArrayList<>(1); map.put(server, regionsOnServer); } regionsOnServer.add(hri); @@ -220,11 +210,11 @@ public class RegionStates { * Get regions in transition and their states */ public synchronized Set getRegionsInTransition() { - return new HashSet(regionsInTransition.values()); + return new HashSet<>(regionsInTransition.values()); } public synchronized SortedSet getRegionsInTransitionOrderedByTimestamp() { - final TreeSet rit = new TreeSet(REGION_STATE_COMPARATOR); + final TreeSet rit = new TreeSet<>(REGION_STATE_COMPARATOR); for (RegionState rs: regionsInTransition.values()) { rit.add(rs); } @@ -404,7 +394,7 @@ public class RegionStates { RegionState oldState = regionStates.put(encodedName, regionState); Map map = regionStatesTableIndex.get(table); if (map == null) { - map = new HashMap(); + map = new HashMap<>(); regionStatesTableIndex.put(table, map); } map.put(encodedName, regionState); @@ -483,7 +473,7 @@ public class RegionStates { private void addToServerHoldings(ServerName serverName, HRegionInfo hri) { Set regions = serverHoldings.get(serverName); if (regions == null) { - regions = new HashSet(); + regions = new HashSet<>(); serverHoldings.put(serverName, regions); } regions.add(hri); @@ -494,7 +484,7 @@ public class RegionStates { Set replicas = defaultReplicaToOtherReplicas.get(defaultReplica); if (replicas == null) { - replicas = new HashSet(); + replicas = new HashSet<>(); defaultReplicaToOtherReplicas.put(defaultReplica, replicas); } replicas.add(hri); @@ -618,16 +608,16 @@ public class RegionStates { */ public List serverOffline(final ServerName sn) { // Offline all regions on this server not already in transition. - List rits = new ArrayList(); - Set regionsToCleanIfNoMetaEntry = new HashSet(); + List rits = new ArrayList<>(); + Set regionsToCleanIfNoMetaEntry = new HashSet<>(); // Offline regions outside the loop and synchronized block to avoid // ConcurrentModificationException and deadlock in case of meta anassigned, // but RegionState a blocked. - Set regionsToOffline = new HashSet(); + Set regionsToOffline = new HashSet<>(); synchronized (this) { Set assignedRegions = serverHoldings.get(sn); if (assignedRegions == null) { - assignedRegions = new HashSet(); + assignedRegions = new HashSet<>(); } for (HRegionInfo region : assignedRegions) { @@ -711,7 +701,7 @@ public class RegionStates { * @return Online regions from tableName */ public synchronized List getRegionsOfTable(TableName tableName) { - List tableRegions = new ArrayList(); + List tableRegions = new ArrayList<>(); // boundary needs to have table's name but regionID 0 so that it is sorted // before all table's regions. HRegionInfo boundary = new HRegionInfo(tableName, null, null, false, 0L); @@ -733,10 +723,9 @@ public class RegionStates { */ public synchronized Map> getRegionByStateOfTable(TableName tableName) { - Map> tableRegions = - new HashMap>(); + Map> tableRegions = new HashMap<>(); for (State state : State.values()) { - tableRegions.put(state, new ArrayList()); + tableRegions.put(state, new ArrayList<>()); } Map indexMap = regionStatesTableIndex.get(tableName); if (indexMap == null) @@ -774,7 +763,7 @@ public class RegionStates { * We loop through all regions assuming we don't delete tables too much. */ public void tableDeleted(final TableName tableName) { - Set regionsToDelete = new HashSet(); + Set regionsToDelete = new HashSet<>(); synchronized (this) { for (RegionState state: regionStates.values()) { HRegionInfo region = state.getRegion(); @@ -794,7 +783,7 @@ public class RegionStates { public synchronized Set getServerRegions(ServerName serverName) { Set regions = serverHoldings.get(serverName); if (regions == null) return null; - return new HashSet(regions); + return new HashSet<>(regions); } /** @@ -954,7 +943,7 @@ public class RegionStates { */ synchronized Map closeAllUserRegions(Set excludedTables) { boolean noExcludeTables = excludedTables == null || excludedTables.isEmpty(); - Set toBeClosed = new HashSet(regionStates.size()); + Set toBeClosed = new HashSet<>(regionStates.size()); for(RegionState state: regionStates.values()) { HRegionInfo hri = state.getRegion(); if (state.isSplit() || hri.isSplit()) { @@ -966,8 +955,7 @@ public class RegionStates { toBeClosed.add(hri); } } - Map allUserRegions = - new HashMap(toBeClosed.size()); + Map allUserRegions = new HashMap<>(toBeClosed.size()); for (HRegionInfo hri: toBeClosed) { RegionState regionState = updateRegionState(hri, State.CLOSED); allUserRegions.put(hri, regionState.getServerName()); @@ -1032,7 +1020,7 @@ public class RegionStates { for (Map> map: result.values()) { for (ServerName svr: onlineSvrs.keySet()) { if (!map.containsKey(svr)) { - map.put(svr, new ArrayList()); + map.put(svr, new ArrayList<>()); } } map.keySet().removeAll(drainingServers); @@ -1041,20 +1029,19 @@ public class RegionStates { } private Map>> getTableRSRegionMap(Boolean bytable){ - Map>> result = - new HashMap>>(); + Map>> result = new HashMap<>(); for (Map.Entry> e: serverHoldings.entrySet()) { for (HRegionInfo hri: e.getValue()) { if (hri.isMetaRegion()) continue; TableName tablename = bytable ? hri.getTable() : TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME); Map> svrToRegions = result.get(tablename); if (svrToRegions == null) { - svrToRegions = new HashMap>(serverHoldings.size()); + svrToRegions = new HashMap<>(serverHoldings.size()); result.put(tablename, svrToRegions); } List regions = svrToRegions.get(e.getKey()); if (regions == null) { - regions = new ArrayList(); + regions = new ArrayList<>(); svrToRegions.put(e.getKey(), regions); } regions.add(hri); @@ -1072,10 +1059,9 @@ public class RegionStates { * @return a Map of ServerName to a List of HRegionInfo's */ protected synchronized Map> getRegionAssignmentsByServer() { - Map> regionsByServer = - new HashMap>(serverHoldings.size()); + Map> regionsByServer = new HashMap<>(serverHoldings.size()); for (Map.Entry> e: serverHoldings.entrySet()) { - regionsByServer.put(e.getKey(), new ArrayList(e.getValue())); + regionsByServer.put(e.getKey(), new ArrayList<>(e.getValue())); } return regionsByServer; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index eb96f9788da..e6b60d83d1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -131,29 +131,25 @@ public class ServerManager { * The last flushed sequence id for a region. */ private final ConcurrentNavigableMap flushedSequenceIdByRegion = - new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); /** * The last flushed sequence id for a store in a region. */ private final ConcurrentNavigableMap> - storeFlushedSequenceIdsByRegion = - new ConcurrentSkipListMap>(Bytes.BYTES_COMPARATOR); + storeFlushedSequenceIdsByRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); /** Map of registered servers to their current load */ - private final ConcurrentNavigableMap onlineServers = - new ConcurrentSkipListMap(); + private final ConcurrentNavigableMap onlineServers = new ConcurrentSkipListMap<>(); /** * Map of admin interfaces per registered regionserver; these interfaces we use to control * regionservers out on the cluster */ - private final Map rsAdmins = - new HashMap(); + private final Map rsAdmins = new HashMap<>(); /** List of region servers that should not get any more new regions. */ - private final ArrayList drainingServers = - new ArrayList(); + private final ArrayList drainingServers = new ArrayList<>(); private final MasterServices master; private final ClusterConnection connection; @@ -182,7 +178,7 @@ public class ServerManager { * So this is a set of region servers known to be dead but not submitted to * ServerShutdownHandler for processing yet. */ - private Set queuedDeadServers = new HashSet(); + private Set queuedDeadServers = new HashSet<>(); /** * Set of region servers which are dead and submitted to ServerShutdownHandler to process but not @@ -199,11 +195,10 @@ public class ServerManager { * is currently in startup mode. In this case, the dead server will be parked in this set * temporarily. */ - private Map requeuedDeadServers - = new ConcurrentHashMap(); + private Map requeuedDeadServers = new ConcurrentHashMap<>(); /** Listeners that are called on server events. */ - private List listeners = new CopyOnWriteArrayList(); + private List listeners = new CopyOnWriteArrayList<>(); /** * Constructor. @@ -1111,7 +1106,7 @@ public class ServerManager { public List getOnlineServersList() { // TODO: optimize the load balancer call so we don't need to make a new list // TODO: FIX. THIS IS POPULAR CALL. - return new ArrayList(this.onlineServers.keySet()); + return new ArrayList<>(this.onlineServers.keySet()); } /** @@ -1139,14 +1134,14 @@ public class ServerManager { * @return A copy of the internal list of draining servers. */ public List getDrainingServersList() { - return new ArrayList(this.drainingServers); + return new ArrayList<>(this.drainingServers); } /** * @return A copy of the internal set of deadNotExpired servers. */ Set getDeadNotExpiredServers() { - return new HashSet(this.queuedDeadServers); + return new HashSet<>(this.queuedDeadServers); } /** @@ -1287,11 +1282,9 @@ public class ServerManager { LOG.warn("Attempting to send favored nodes update rpc to server " + server.toString() + " failed because no RPC connection found to this server"); } else { - List>> regionUpdateInfos = - new ArrayList>>(); + List>> regionUpdateInfos = new ArrayList<>(); for (Entry> entry : favoredNodes.entrySet()) { - regionUpdateInfos.add(new Pair>(entry.getKey(), - entry.getValue())); + regionUpdateInfos.add(new Pair<>(entry.getKey(), entry.getValue())); } UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 8fedb408277..6e477bc80ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -80,19 +80,19 @@ public class SnapshotOfRegionAssignmentFromMeta { private final boolean excludeOfflinedSplitParents; public SnapshotOfRegionAssignmentFromMeta(Connection connection) { - this(connection, new HashSet(), false); + this(connection, new HashSet<>(), false); } public SnapshotOfRegionAssignmentFromMeta(Connection connection, Set disabledTables, boolean excludeOfflinedSplitParents) { this.connection = connection; - tableToRegionMap = new HashMap>(); - regionToRegionServerMap = new HashMap(); - currentRSToRegionMap = new HashMap>(); - primaryRSToRegionMap = new HashMap>(); - secondaryRSToRegionMap = new HashMap>(); - teritiaryRSToRegionMap = new HashMap>(); - regionNameToRegionInfoMap = new TreeMap(); + tableToRegionMap = new HashMap<>(); + regionToRegionServerMap = new HashMap<>(); + currentRSToRegionMap = new HashMap<>(); + primaryRSToRegionMap = new HashMap<>(); + secondaryRSToRegionMap = new HashMap<>(); + teritiaryRSToRegionMap = new HashMap<>(); + regionNameToRegionInfoMap = new TreeMap<>(); existingAssignmentPlan = new FavoredNodesPlan(); this.disabledTables = disabledTables; this.excludeOfflinedSplitParents = excludeOfflinedSplitParents; @@ -180,7 +180,7 @@ public class SnapshotOfRegionAssignmentFromMeta { TableName tableName = regionInfo.getTable(); List regionList = tableToRegionMap.get(tableName); if (regionList == null) { - regionList = new ArrayList(); + regionList = new ArrayList<>(); } // Add the current region info into the tableToRegionMap regionList.add(regionInfo); @@ -196,7 +196,7 @@ public class SnapshotOfRegionAssignmentFromMeta { // Process the region server to region map List regionList = currentRSToRegionMap.get(server); if (regionList == null) { - regionList = new ArrayList(); + regionList = new ArrayList<>(); } regionList.add(regionInfo); currentRSToRegionMap.put(server, regionList); @@ -206,7 +206,7 @@ public class SnapshotOfRegionAssignmentFromMeta { // Process the region server to region map List regionList = primaryRSToRegionMap.get(server); if (regionList == null) { - regionList = new ArrayList(); + regionList = new ArrayList<>(); } regionList.add(regionInfo); primaryRSToRegionMap.put(server, regionList); @@ -216,7 +216,7 @@ public class SnapshotOfRegionAssignmentFromMeta { // Process the region server to region map List regionList = secondaryRSToRegionMap.get(server); if (regionList == null) { - regionList = new ArrayList(); + regionList = new ArrayList<>(); } regionList.add(regionInfo); secondaryRSToRegionMap.put(server, regionList); @@ -226,7 +226,7 @@ public class SnapshotOfRegionAssignmentFromMeta { // Process the region server to region map List regionList = teritiaryRSToRegionMap.get(server); if (regionList == null) { - regionList = new ArrayList(); + regionList = new ArrayList<>(); } regionList.add(regionInfo); teritiaryRSToRegionMap.put(server, regionList); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 20fef3598d0..7017d29537c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -118,7 +118,7 @@ public class SplitLogManager { protected final ReentrantLock recoveringRegionLock = new ReentrantLock(); @VisibleForTesting - final ConcurrentMap tasks = new ConcurrentHashMap(); + final ConcurrentMap tasks = new ConcurrentHashMap<>(); private TimeoutMonitor timeoutMonitor; private volatile Set deadWorkers = null; @@ -176,7 +176,7 @@ public class SplitLogManager { public static FileStatus[] getFileList(final Configuration conf, final List logDirs, final PathFilter filter) throws IOException { - List fileStatus = new ArrayList(); + List fileStatus = new ArrayList<>(); for (Path logDir : logDirs) { final FileSystem fs = logDir.getFileSystem(conf); if (!fs.exists(logDir)) { @@ -201,7 +201,7 @@ public class SplitLogManager { * @throws IOException */ public long splitLogDistributed(final Path logDir) throws IOException { - List logDirs = new ArrayList(); + List logDirs = new ArrayList<>(); logDirs.add(logDir); return splitLogDistributed(logDirs); } @@ -218,7 +218,7 @@ public class SplitLogManager { if (logDirs.isEmpty()) { return 0; } - Set serverNames = new HashSet(); + Set serverNames = new HashSet<>(); for (Path logDir : logDirs) { try { ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(logDir); @@ -398,7 +398,7 @@ public class SplitLogManager { } if (serverNames == null || serverNames.isEmpty()) return; - Set recoveredServerNameSet = new HashSet(); + Set recoveredServerNameSet = new HashSet<>(); for (ServerName tmpServerName : serverNames) { recoveredServerNameSet.add(tmpServerName.getServerName()); } @@ -410,8 +410,7 @@ public class SplitLogManager { } catch (IOException e) { LOG.warn("removeRecoveringRegions got exception. Will retry", e); if (serverNames != null && !serverNames.isEmpty()) { - this.failedRecoveringRegionDeletions.add(new Pair, Boolean>(serverNames, - isMetaRecovery)); + this.failedRecoveringRegionDeletions.add(new Pair<>(serverNames, isMetaRecovery)); } } finally { this.recoveringRegionLock.unlock(); @@ -426,7 +425,7 @@ public class SplitLogManager { */ void removeStaleRecoveringRegions(final Set failedServers) throws IOException, InterruptedIOException { - Set knownFailedServers = new HashSet(); + Set knownFailedServers = new HashSet<>(); if (failedServers != null) { for (ServerName tmpServerName : failedServers) { knownFailedServers.add(tmpServerName.getServerName()); @@ -519,7 +518,7 @@ public class SplitLogManager { // to reason about concurrency. Makes it easier to retry. synchronized (deadWorkersLock) { if (deadWorkers == null) { - deadWorkers = new HashSet(100); + deadWorkers = new HashSet<>(100); } deadWorkers.add(workerName); } @@ -529,7 +528,7 @@ public class SplitLogManager { void handleDeadWorkers(Set serverNames) { synchronized (deadWorkersLock) { if (deadWorkers == null) { - deadWorkers = new HashSet(100); + deadWorkers = new HashSet<>(100); } deadWorkers.addAll(serverNames); } @@ -749,7 +748,7 @@ public class SplitLogManager { getSplitLogManagerCoordination().getDetails().getFailedDeletions(); // Retry previously failed deletes if (failedDeletions.size() > 0) { - List tmpPaths = new ArrayList(failedDeletions); + List tmpPaths = new ArrayList<>(failedDeletions); for (String tmpPath : tmpPaths) { // deleteNode is an async call getSplitLogManagerCoordination().deleteTask(tmpPath); @@ -766,7 +765,7 @@ public class SplitLogManager { // inside the function there have more checks before GC anything if (!failedRecoveringRegionDeletions.isEmpty()) { List, Boolean>> previouslyFailedDeletions = - new ArrayList, Boolean>>(failedRecoveringRegionDeletions); + new ArrayList<>(failedRecoveringRegionDeletions); failedRecoveringRegionDeletions.removeAll(previouslyFailedDeletions); for (Pair, Boolean> failedDeletion : previouslyFailedDeletions) { removeRecoveringRegions(failedDeletion.getFirst(), failedDeletion.getSecond()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index f6ae9af729e..b0e088c0400 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -73,7 +73,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { protected static final int MIN_SERVER_BALANCE = 2; private volatile boolean stopped = false; - private static final List EMPTY_REGION_LIST = new ArrayList(0); + private static final List EMPTY_REGION_LIST = new ArrayList<>(0); static final Predicate IDLE_SERVER_PREDICATOR = load -> load.getNumberOfRegions() == 0; @@ -187,19 +187,19 @@ public abstract class BaseLoadBalancer implements LoadBalancer { unassignedRegions = EMPTY_REGION_LIST; } - serversToIndex = new HashMap(); - hostsToIndex = new HashMap(); - racksToIndex = new HashMap(); - tablesToIndex = new HashMap(); + serversToIndex = new HashMap<>(); + hostsToIndex = new HashMap<>(); + racksToIndex = new HashMap<>(); + tablesToIndex = new HashMap<>(); //TODO: We should get the list of tables from master - tables = new ArrayList(); + tables = new ArrayList<>(); this.rackManager = rackManager != null ? rackManager : new DefaultRackManager(); numRegions = 0; - List> serversPerHostList = new ArrayList>(); - List> serversPerRackList = new ArrayList>(); + List> serversPerHostList = new ArrayList<>(); + List> serversPerRackList = new ArrayList<>(); this.clusterState = clusterState; this.regionFinder = regionFinder; @@ -211,7 +211,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { } if (!hostsToIndex.containsKey(sn.getHostname())) { hostsToIndex.put(sn.getHostname(), numHosts++); - serversPerHostList.add(new ArrayList(1)); + serversPerHostList.add(new ArrayList<>(1)); } int serverIndex = serversToIndex.get(sn.getHostAndPort()); @@ -221,7 +221,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { String rack = this.rackManager.getRack(sn); if (!racksToIndex.containsKey(rack)) { racksToIndex.put(rack, numRacks++); - serversPerRackList.add(new ArrayList()); + serversPerRackList.add(new ArrayList<>()); } int rackIndex = racksToIndex.get(rack); serversPerRackList.get(rackIndex).add(serverIndex); @@ -233,7 +233,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { } numRegions += unassignedRegions.size(); - regionsToIndex = new HashMap(numRegions); + regionsToIndex = new HashMap<>(numRegions); servers = new ServerName[numServers]; serversPerHost = new int[numHosts][]; serversPerRack = new int[numRacks][]; @@ -1064,7 +1064,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { public static final String TABLES_ON_MASTER = "hbase.balancer.tablesOnMaster"; - protected final Set tablesOnMaster = new HashSet(); + protected final Set tablesOnMaster = new HashSet<>(); protected MetricsBalancer metricsBalancer = null; protected ClusterStatus clusterStatus = null; protected ServerName masterServerName; @@ -1170,7 +1170,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { // Move this region away from the master regionserver RegionPlan plan = new RegionPlan(region, masterServerName, dest); if (plans == null) { - plans = new ArrayList(); + plans = new ArrayList<>(); } plans.add(plan); } @@ -1183,7 +1183,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { // Move this region to the master regionserver RegionPlan plan = new RegionPlan(region, server.getKey(), masterServerName); if (plans == null) { - plans = new ArrayList(); + plans = new ArrayList<>(); } plans.add(plan); } @@ -1199,10 +1199,9 @@ public abstract class BaseLoadBalancer implements LoadBalancer { if (servers == null || regions == null || regions.isEmpty()) { return null; } - Map> assignments - = new TreeMap>(); + Map> assignments = new TreeMap<>(); if (masterServerName != null && servers.contains(masterServerName)) { - assignments.put(masterServerName, new ArrayList()); + assignments.put(masterServerName, new ArrayList<>()); for (HRegionInfo region: regions) { if (shouldBeOnMaster(region)) { assignments.get(masterServerName).add(region); @@ -1303,12 +1302,12 @@ public abstract class BaseLoadBalancer implements LoadBalancer { metricsBalancer.incrMiscInvocations(); Map> assignments = assignMasterRegions(regions, servers); if (assignments != null && !assignments.isEmpty()) { - servers = new ArrayList(servers); + servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); List masterRegions = assignments.get(masterServerName); if (!masterRegions.isEmpty()) { - regions = new ArrayList(regions); + regions = new ArrayList<>(regions); for (HRegionInfo region: masterRegions) { regions.remove(region); } @@ -1331,17 +1330,17 @@ public abstract class BaseLoadBalancer implements LoadBalancer { if (numServers == 1) { // Only one server, nothing fancy we can do here ServerName server = servers.get(0); - assignments.put(server, new ArrayList(regions)); + assignments.put(server, new ArrayList<>(regions)); return assignments; } Cluster cluster = createCluster(servers, regions, false); - List unassignedRegions = new ArrayList(); + List unassignedRegions = new ArrayList<>(); roundRobinAssignment(cluster, regions, unassignedRegions, servers, assignments); - List lastFewRegions = new ArrayList(); + List lastFewRegions = new ArrayList<>(); // assign the remaining by going through the list and try to assign to servers one-by-one int serverIdx = RANDOM.nextInt(numServers); for (HRegionInfo region : unassignedRegions) { @@ -1351,7 +1350,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { if (!cluster.wouldLowerAvailability(region, serverName)) { List serverRegions = assignments.get(serverName); if (serverRegions == null) { - serverRegions = new ArrayList(); + serverRegions = new ArrayList<>(); assignments.put(serverName, serverRegions); } serverRegions.add(region); @@ -1372,7 +1371,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { ServerName server = servers.get(i); List serverRegions = assignments.get(server); if (serverRegions == null) { - serverRegions = new ArrayList(); + serverRegions = new ArrayList<>(); assignments.put(server, serverRegions); } serverRegions.add(region); @@ -1416,7 +1415,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { if (shouldBeOnMaster(regionInfo)) { return masterServerName; } - servers = new ArrayList(servers); + servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); } @@ -1465,12 +1464,12 @@ public abstract class BaseLoadBalancer implements LoadBalancer { Map> assignments = assignMasterRegions(regions.keySet(), servers); if (assignments != null && !assignments.isEmpty()) { - servers = new ArrayList(servers); + servers = new ArrayList<>(servers); // Guarantee not to put other regions on master servers.remove(masterServerName); List masterRegions = assignments.get(masterServerName); if (!masterRegions.isEmpty()) { - regions = new HashMap(regions); + regions = new HashMap<>(regions); for (HRegionInfo region: masterRegions) { regions.remove(region); } @@ -1487,7 +1486,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { } if (numServers == 1) { // Only one server, nothing fancy we can do here ServerName server = servers.get(0); - assignments.put(server, new ArrayList(regions.keySet())); + assignments.put(server, new ArrayList<>(regions.keySet())); return assignments; } @@ -1499,7 +1498,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { // servers on the same host on different ports. ArrayListMultimap serversByHostname = ArrayListMultimap.create(); for (ServerName server : servers) { - assignments.put(server, new ArrayList()); + assignments.put(server, new ArrayList<>()); serversByHostname.put(server.getHostname(), server); } @@ -1516,7 +1515,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { for (Map.Entry entry : regions.entrySet()) { HRegionInfo region = entry.getKey(); ServerName oldServerName = entry.getValue(); - List localServers = new ArrayList(); + List localServers = new ArrayList<>(); if (oldServerName != null) { localServers = serversByHostname.get(oldServerName.getHostname()); } @@ -1629,7 +1628,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { for (int j = 0; j < numServers; j++) { ServerName server = servers.get((j + serverIdx) % numServers); - List serverRegions = new ArrayList(max); + List serverRegions = new ArrayList<>(max); for (int i = regionIdx; i < numRegions; i += numServers) { HRegionInfo region = regions.get(i % numRegions); if (cluster.wouldLowerAvailability(region, server)) { @@ -1649,7 +1648,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { if (this.services != null && this.services.getAssignmentManager() != null) { return this.services.getAssignmentManager().getSnapShotOfAssignment(regions); } else { - return new HashMap>(); + return new HashMap<>(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java index e5f0e3b36a9..8c337bdf79a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java @@ -39,7 +39,7 @@ public class ClusterLoadState { this.numRegions = 0; this.numServers = clusterState.size(); this.clusterState = clusterState; - serversByLoad = new TreeMap>(); + serversByLoad = new TreeMap<>(); // Iterate so we can count regions as we build the map for (Map.Entry> server : clusterState.entrySet()) { List regions = server.getValue(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index d5edfab5d28..f7e166d54e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -179,7 +179,7 @@ class RegionLocationFinder { */ protected List getTopBlockLocations(HRegionInfo region, String currentHost) { HDFSBlocksDistribution blocksDistribution = getBlockDistribution(region); - List topHosts = new ArrayList(); + List topHosts = new ArrayList<>(); for (String host : blocksDistribution.getTopHosts()) { if (host.equals(currentHost)) { break; @@ -250,15 +250,15 @@ class RegionLocationFinder { return Lists.newArrayList(); } - List topServerNames = new ArrayList(); + List topServerNames = new ArrayList<>(); Collection regionServers = status.getServers(); // create a mapping from hostname to ServerName for fast lookup - HashMap> hostToServerName = new HashMap>(); + HashMap> hostToServerName = new HashMap<>(); for (ServerName sn : regionServers) { String host = sn.getHostname(); if (!hostToServerName.containsKey(host)) { - hostToServerName.put(host, new ArrayList()); + hostToServerName.put(host, new ArrayList<>()); } hostToServerName.get(host).add(sn); } @@ -309,8 +309,7 @@ class RegionLocationFinder { } public void refreshAndWait(Collection hris) { - ArrayList> regionLocationFutures = - new ArrayList>(hris.size()); + ArrayList> regionLocationFutures = new ArrayList<>(hris.size()); for (HRegionInfo hregionInfo : hris) { regionLocationFutures.add(asyncGetBlockDistribution(hregionInfo)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index a6a0774e916..7e8d6968a41 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -255,7 +255,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { if (clusterMap.size() <= 2) { return null; } - clusterMap = new HashMap>(clusterMap); + clusterMap = new HashMap<>(clusterMap); clusterMap.remove(masterServerName); } @@ -285,14 +285,13 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { // TODO: Look at data block locality or a more complex load to do this MinMaxPriorityQueue regionsToMove = MinMaxPriorityQueue.orderedBy(rpComparator).create(); - regionsToReturn = new ArrayList(); + regionsToReturn = new ArrayList<>(); // Walk down most loaded, pruning each to the max int serversOverloaded = 0; // flag used to fetch regions from head and tail of list, alternately boolean fetchFromTail = false; - Map serverBalanceInfo = - new TreeMap(); + Map serverBalanceInfo = new TreeMap<>(); for (Map.Entry> server: serversByLoad.descendingMap().entrySet()) { ServerAndLoad sal = server.getKey(); @@ -330,7 +329,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { int neededRegions = 0; // number of regions needed to bring all up to min fetchFromTail = false; - Map underloadedServers = new HashMap(); + Map underloadedServers = new HashMap<>(); int maxToTake = numRegions - min; for (Map.Entry> server: serversByLoad.entrySet()) { @@ -524,8 +523,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { // A structure help to map ServerName to it's load and index in ServerLoadList Map> SnLoadMap = new HashMap<>(); for (int i = 0; i < serverLoadList.size(); i++) { - SnLoadMap.put(serverLoadList.get(i).getServerName(), - new Pair(serverLoadList.get(i), i)); + SnLoadMap.put(serverLoadList.get(i).getServerName(), new Pair<>(serverLoadList.get(i), i)); } Pair shredLoad; // A List to help mark the plan in regionsToMove that should be removed diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index f2329bb1eb0..f68afb6e7c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -121,7 +121,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private static final Random RANDOM = new Random(System.currentTimeMillis()); private static final Log LOG = LogFactory.getLog(StochasticLoadBalancer.class); - Map> loads = new HashMap>(); + Map> loads = new HashMap<>(); // values are defaults private int maxSteps = 1000000; @@ -332,7 +332,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { if (clusterState.size() <= 2) { return null; } - clusterState = new HashMap>(clusterState); + clusterState = new HashMap<>(clusterState); clusterState.remove(masterServerName); } @@ -482,7 +482,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { * @return List of RegionPlan's that represent the moves needed to get to desired final state. */ private List createRegionPlans(Cluster cluster) { - List plans = new LinkedList(); + List plans = new LinkedList<>(); for (int regionIndex = 0; regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) { int initialServerIndex = cluster.initialRegionIndexToServerIndex[regionIndex]; @@ -511,7 +511,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { // We create a new hashmap so that regions that are no longer there are removed. // However we temporarily need the old loads so we can use them to keep the rolling average. Map> oldLoads = loads; - loads = new HashMap>(); + loads = new HashMap<>(); for (ServerName sn : clusterStatus.getServers()) { ServerLoad sl = clusterStatus.getLoad(sn); @@ -522,7 +522,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { Deque rLoads = oldLoads.get(Bytes.toString(entry.getKey())); if (rLoads == null) { // There was nothing there - rLoads = new ArrayDeque(); + rLoads = new ArrayDeque<>(); } else if (rLoads.size() >= numRegionLoadsToRemember) { rLoads.remove(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java index c6b6f620831..dddad36a96c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java @@ -92,7 +92,7 @@ public abstract class CleanerChore extends Schedu * @param confKey key to get the file cleaner classes from the configuration */ private void initCleanerChain(String confKey) { - this.cleanersChain = new LinkedList(); + this.cleanersChain = new LinkedList<>(); String[] logCleaners = conf.getStrings(confKey); if (logCleaners != null) { for (String className : logCleaners) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java index 5c562718aef..45b2401701d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java @@ -74,7 +74,7 @@ public class ReplicationMetaCleaner extends ScheduledChore { } } if (hasSerialScope) { - serialTables.put(entry.getValue().getTableName().getNameAsString(), new HashSet()); + serialTables.put(entry.getValue().getTableName().getNameAsString(), new HashSet<>()); } } if (serialTables.isEmpty()){ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java index c0a1b753f95..dafc4f8f820 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java @@ -82,7 +82,7 @@ public class ReplicationZKNodeCleaner { for (String queueId : queueIds) { ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId); if (!peerIds.contains(queueInfo.getPeerId())) { - undeletedQueues.computeIfAbsent(replicator, (key) -> new ArrayList()).add( + undeletedQueues.computeIfAbsent(replicator, (key) -> new ArrayList<>()).add( queueId); if (LOG.isDebugEnabled()) { LOG.debug("Undeleted replication queue for removed peer found: " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java index c4b49f0bd5a..8e490ebafc5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java @@ -477,4 +477,4 @@ public final class LockProcedure extends Procedure env.getProcedureScheduler().wakeRegions(LockProcedure.this, tableName, regionInfos); } } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 5c67258eb95..9c8358b8181 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -118,7 +118,7 @@ public class SimpleRegionNormalizer implements RegionNormalizer { return null; } - List plans = new ArrayList(); + List plans = new ArrayList<>(); List tableRegions = masterServices.getAssignmentManager().getRegionStates(). getRegionsOfTable(table); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index aefd14c1dfa..347d01d8215 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -70,8 +70,7 @@ public class CloneSnapshotProcedure private HTableDescriptor hTableDescriptor; private SnapshotDescription snapshot; private List newRegions = null; - private Map > parentsToChildrenPairMap = - new HashMap>(); + private Map > parentsToChildrenPairMap = new HashMap<>(); // Monitor private MonitoredTask monitorStatus = null; @@ -264,18 +263,18 @@ public class CloneSnapshotProcedure if (cloneSnapshotMsg.getRegionInfoCount() == 0) { newRegions = null; } else { - newRegions = new ArrayList(cloneSnapshotMsg.getRegionInfoCount()); + newRegions = new ArrayList<>(cloneSnapshotMsg.getRegionInfoCount()); for (HBaseProtos.RegionInfo hri: cloneSnapshotMsg.getRegionInfoList()) { newRegions.add(HRegionInfo.convert(hri)); } } if (cloneSnapshotMsg.getParentToChildRegionsPairListCount() > 0) { - parentsToChildrenPairMap = new HashMap>(); + parentsToChildrenPairMap = new HashMap<>(); for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair: cloneSnapshotMsg.getParentToChildRegionsPairListList()) { parentsToChildrenPairMap.put( parentToChildrenPair.getParentRegionName(), - new Pair( + new Pair<>( parentToChildrenPair.getChild1RegionName(), parentToChildrenPair.getChild2RegionName())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 2421dfcf52b..ced7abc28d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -208,7 +208,7 @@ public class CreateTableProcedure if (state.getRegionInfoCount() == 0) { newRegions = null; } else { - newRegions = new ArrayList(state.getRegionInfoCount()); + newRegions = new ArrayList<>(state.getRegionInfoCount()); for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) { newRegions.add(HRegionInfo.convert(hri)); } @@ -364,8 +364,7 @@ public class CreateTableProcedure if (numRegionReplicas <= 0) { return regions; } - List hRegionInfos = - new ArrayList((numRegionReplicas+1)*regions.size()); + List hRegionInfos = new ArrayList<>((numRegionReplicas+1)*regions.size()); for (int i = 0; i < regions.size(); i++) { for (int j = 1; j <= numRegionReplicas; j++) { hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), j)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 06b666b47e0..9d0a283c6a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -221,7 +221,7 @@ public class DeleteTableProcedure if (state.getRegionInfoCount() == 0) { regions = null; } else { - regions = new ArrayList(state.getRegionInfoCount()); + regions = new ArrayList<>(state.getRegionInfoCount()); for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) { regions.add(HRegionInfo.convert(hri)); } @@ -343,7 +343,7 @@ public class DeleteTableProcedure Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { - List deletes = new ArrayList(); + List deletes = new ArrayList<>(); try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { for (Result result : resScanner) { deletes.add(new Delete(result.getRow())); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index f4ecf153567..4d67eddc906 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -357,7 +357,7 @@ public class EnableTableProcedure // need to potentially create some regions for the replicas List unrecordedReplicas = - AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet( + AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet<>( regionsToAssign.keySet()), masterServices); Map> srvToUnassignedRegs = assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas, @@ -464,8 +464,7 @@ public class EnableTableProcedure private static Map regionsToAssignWithServerName( final MasterProcedureEnv env, final List> regionsInMeta) throws IOException { - Map regionsToAssign = - new HashMap(regionsInMeta.size()); + Map regionsToAssign = new HashMap<>(regionsInMeta.size()); RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates(); for (Pair regionLocation : regionsInMeta) { HRegionInfo hri = regionLocation.getFirst(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java index 980bf9481c6..4b9a7ab9e08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java @@ -97,12 +97,12 @@ public final class MasterDDLOperationHelper { regionLocations = locator.getAllRegionLocations(); } // Convert List to Map. - NavigableMap hri2Sn = new TreeMap(); + NavigableMap hri2Sn = new TreeMap<>(); for (HRegionLocation location : regionLocations) { hri2Sn.put(location.getRegionInfo(), location.getServerName()); } TreeMap> serverToRegions = Maps.newTreeMap(); - List reRegions = new ArrayList(); + List reRegions = new ArrayList<>(); for (HRegionInfo hri : regionInfoList) { ServerName sn = hri2Sn.get(hri); // Skip the offlined split parent region diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java index d7fe5f6bddb..366378aac26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java @@ -678,7 +678,7 @@ public class MergeTableRegionsProcedure final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { @MetaMutationAnnotation - final List metaEntries = new ArrayList(); + final List metaEntries = new ArrayList<>(); boolean ret = cpHost.preMergeRegionsCommit(regionsToMerge, metaEntries, getUser()); if (ret) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index f1b411abcf2..6a70f6238d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -337,7 +337,7 @@ public class ModifyTableProcedure final int newReplicaCount = newHTableDescriptor.getRegionReplication(); if (newReplicaCount < oldReplicaCount) { - Set tableRows = new HashSet(); + Set tableRows = new HashSet<>(); Connection connection = env.getMasterServices().getConnection(); Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName()); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index d99bd6b0ccb..f8c9d8f95c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -66,8 +66,7 @@ public class RestoreSnapshotProcedure private List regionsToRestore = null; private List regionsToRemove = null; private List regionsToAdd = null; - private Map> parentsToChildrenPairMap = - new HashMap>(); + private Map> parentsToChildrenPairMap = new HashMap<>(); private SnapshotDescription snapshot; @@ -275,8 +274,7 @@ public class RestoreSnapshotProcedure if (restoreSnapshotMsg.getRegionInfoForRestoreCount() == 0) { regionsToRestore = null; } else { - regionsToRestore = - new ArrayList(restoreSnapshotMsg.getRegionInfoForRestoreCount()); + regionsToRestore = new ArrayList<>(restoreSnapshotMsg.getRegionInfoForRestoreCount()); for (HBaseProtos.RegionInfo hri: restoreSnapshotMsg.getRegionInfoForRestoreList()) { regionsToRestore.add(HRegionInfo.convert(hri)); } @@ -284,8 +282,7 @@ public class RestoreSnapshotProcedure if (restoreSnapshotMsg.getRegionInfoForRemoveCount() == 0) { regionsToRemove = null; } else { - regionsToRemove = - new ArrayList(restoreSnapshotMsg.getRegionInfoForRemoveCount()); + regionsToRemove = new ArrayList<>(restoreSnapshotMsg.getRegionInfoForRemoveCount()); for (HBaseProtos.RegionInfo hri: restoreSnapshotMsg.getRegionInfoForRemoveList()) { regionsToRemove.add(HRegionInfo.convert(hri)); } @@ -293,7 +290,7 @@ public class RestoreSnapshotProcedure if (restoreSnapshotMsg.getRegionInfoForAddCount() == 0) { regionsToAdd = null; } else { - regionsToAdd = new ArrayList(restoreSnapshotMsg.getRegionInfoForAddCount()); + regionsToAdd = new ArrayList<>(restoreSnapshotMsg.getRegionInfoForAddCount()); for (HBaseProtos.RegionInfo hri: restoreSnapshotMsg.getRegionInfoForAddList()) { regionsToAdd.add(HRegionInfo.convert(hri)); } @@ -303,7 +300,7 @@ public class RestoreSnapshotProcedure restoreSnapshotMsg.getParentToChildRegionsPairListList()) { parentsToChildrenPairMap.put( parentToChildrenPair.getParentRegionName(), - new Pair( + new Pair<>( parentToChildrenPair.getChild1RegionName(), parentToChildrenPair.getChild2RegionName())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 7b4eb6e0155..484deccfc25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -99,7 +99,7 @@ implements ServerProcedureInterface { public static final int DEFAULT_WAIT_ON_RIT = 30000; - private static final Set META_REGION_SET = new HashSet(); + private static final Set META_REGION_SET = new HashSet<>(); static { META_REGION_SET.add(HRegionInfo.FIRST_META_REGIONINFO); } @@ -424,7 +424,7 @@ implements ServerProcedureInterface { private List calcRegionsToAssign(final MasterProcedureEnv env) throws IOException { AssignmentManager am = env.getMasterServices().getAssignmentManager(); - List regionsToAssignAggregator = new ArrayList(); + List regionsToAssignAggregator = new ArrayList<>(); int replicaCount = env.getMasterConfiguration().getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); for (int i = 1; i < replicaCount; i++) { @@ -625,14 +625,14 @@ implements ServerProcedureInterface { this.shouldSplitWal = state.getShouldSplitWal(); int size = state.getRegionsOnCrashedServerCount(); if (size > 0) { - this.regionsOnCrashedServer = new HashSet(size); + this.regionsOnCrashedServer = new HashSet<>(size); for (RegionInfo ri: state.getRegionsOnCrashedServerList()) { this.regionsOnCrashedServer.add(HRegionInfo.convert(ri)); } } size = state.getRegionsAssignedCount(); if (size > 0) { - this.regionsAssigned = new ArrayList(size); + this.regionsAssigned = new ArrayList<>(size); for (RegionInfo ri: state.getRegionsOnCrashedServerList()) { this.regionsAssigned.add(HRegionInfo.convert(ri)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java index 69b89be705c..3cd6c66b3cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java @@ -547,7 +547,7 @@ public class SplitTableRegionProcedure } if (nbFiles == 0) { // no file needs to be splitted. - return new Pair(0,0); + return new Pair<>(0,0); } // Default max #threads to use is the smaller of table's configured number of blocking store // files or the available number of logical cores. @@ -561,7 +561,7 @@ public class SplitTableRegionProcedure " using " + maxThreads + " threads"); ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool( maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d")); - List>> futures = new ArrayList>> (nbFiles); + List>> futures = new ArrayList<>(nbFiles); // Split each store file. final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); @@ -617,7 +617,7 @@ public class SplitTableRegionProcedure LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles."); } - return new Pair(daughterA, daughterB); + return new Pair<>(daughterA, daughterB); } private void assertReferenceFileCount( @@ -646,7 +646,7 @@ public class SplitTableRegionProcedure if (LOG.isDebugEnabled()) { LOG.debug("Splitting complete for store file: " + sf.getPath() + " for region: " + parentHRI); } - return new Pair(path_first, path_second); + return new Pair<>(path_first, path_second); } /** @@ -684,7 +684,7 @@ public class SplitTableRegionProcedure **/ private void preSplitRegionBeforePONR(final MasterProcedureEnv env) throws IOException, InterruptedException { - final List metaEntries = new ArrayList(); + final List metaEntries = new ArrayList<>(); final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { if (cpHost.preSplitBeforePONRAction(getSplitRow(), metaEntries, getUser())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java index 74828317448..2ab142aeaa0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -250,7 +250,7 @@ public class TruncateTableProcedure if (state.getRegionInfoCount() == 0) { regions = null; } else { - regions = new ArrayList(state.getRegionInfoCount()); + regions = new ArrayList<>(state.getRegionInfoCount()); for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) { regions.add(HRegionInfo.convert(hri)); } @@ -258,7 +258,7 @@ public class TruncateTableProcedure } private static List recreateRegionInfo(final List regions) { - ArrayList newRegions = new ArrayList(regions.size()); + ArrayList newRegions = new ArrayList<>(regions.size()); for (HRegionInfo hri: regions) { newRegions.add(new HRegionInfo(hri.getTable(), hri.getStartKey(), hri.getEndKey())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java index 113ce583c19..5f86e08077e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java @@ -74,7 +74,7 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler { // 1. get all the regions hosting this table. // extract each pair to separate lists - Set regions = new HashSet(); + Set regions = new HashSet<>(); for (Pair p : regionsAndLocations) { // Don't include non-default regions HRegionInfo hri = p.getFirst(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java index e63727a5a24..73cd4d7556e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java @@ -71,7 +71,7 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler { @Override protected void snapshotRegions(List> regions) throws HBaseSnapshotException, IOException { - Set regionServers = new HashSet(regions.size()); + Set regionServers = new HashSet<>(regions.size()); for (Pair region : regions) { if (region != null && region.getFirst() != null && region.getSecond() != null) { HRegionInfo hri = region.getFirst(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java index f03344c3a34..b6641de004f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java @@ -90,13 +90,12 @@ public class SnapshotFileCache implements Stoppable { private final FileSystem fs; private final SnapshotFileInspector fileInspector; private final Path snapshotDir; - private final Set cache = new HashSet(); + private final Set cache = new HashSet<>(); /** * This is a helper map of information about the snapshot directories so we don't need to rescan * them if they haven't changed since the last time we looked. */ - private final Map snapshots = - new HashMap(); + private final Map snapshots = new HashMap<>(); private final Timer refreshTimer; private long lastModifiedTime = Long.MIN_VALUE; @@ -229,7 +228,7 @@ public class SnapshotFileCache implements Stoppable { // 2.clear the cache this.cache.clear(); - Map known = new HashMap(); + Map known = new HashMap<>(); // 3. check each of the snapshot directories FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index b950079002c..54b68d39387 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -147,8 +147,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // The map is always accessed and modified under the object lock using synchronized. // snapshotTable() will insert an Handler in the table. // isSnapshotDone() will remove the handler requested if the operation is finished. - private Map snapshotHandlers = - new HashMap(); + private Map snapshotHandlers = new HashMap<>(); // Restore map, with table name as key, procedure ID as value. // The map is always accessed and modified under the object lock using synchronized. @@ -157,7 +156,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // TODO: just as the Apache HBase 1.x implementation, this map would not survive master // restart/failover. This is just a stopgap implementation until implementation of taking // snapshot using Procedure-V2. - private Map restoreTableToProcIdMap = new HashMap(); + private Map restoreTableToProcIdMap = new HashMap<>(); private Path rootDir; private ExecutorService executorService; @@ -168,7 +167,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * - create snapshot * - SnapshotCleaner * */ - private KeyLocker locks = new KeyLocker(); + private KeyLocker locks = new KeyLocker<>(); @@ -209,7 +208,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @throws IOException File system exception */ private List getCompletedSnapshots(Path snapshotDir) throws IOException { - List snapshotDescs = new ArrayList(); + List snapshotDescs = new ArrayList<>(); // first create the snapshot root path and check to see if it exists FileSystem fs = master.getMasterFileSystem().getFileSystem(); if (snapshotDir == null) snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); @@ -1032,11 +1031,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled); // Extract cleaners from conf - Set hfileCleaners = new HashSet(); + Set hfileCleaners = new HashSet<>(); String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); if (cleaners != null) Collections.addAll(hfileCleaners, cleaners); - Set logCleaners = new HashSet(); + Set logCleaners = new HashSet<>(); cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS); if (cleaners != null) Collections.addAll(logCleaners, cleaners); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index 992f28e9625..123758f73c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -183,7 +183,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh monitor.rethrowException(); // extract each pair to separate lists - Set serverNames = new HashSet(); + Set serverNames = new HashSet<>(); for (Pair p : regionsAndLocations) { if (p != null && p.getFirst() != null && p.getSecond() != null) { HRegionInfo hri = p.getFirst(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index 697286c6e54..56c0242f8b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -174,7 +174,7 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { long bytesWrittenProgressForShippedCall = 0; // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. - List cells = new ArrayList(); + List cells = new ArrayList<>(); // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME int closeCheckSizeLimit = HStore.getCloseCheckInterval(); long lastMillis = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java index 3c6a0712c93..2456a415772 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java @@ -98,7 +98,7 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher { @Override public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId, MonitoredTask status, ThroughputController throughputController) throws IOException { - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); long cellsCount = snapshot.getCellsCount(); if (cellsCount == 0) return result; // don't flush if there are no entries @@ -179,7 +179,7 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher { byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - List cells = new ArrayList(); + List cells = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); boolean control = throughputController != null && !store.getRegionInfo().isSystemTable(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java index 8c760e66e5d..cd4c079b976 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java @@ -55,7 +55,7 @@ public class MobFile { * @throws IOException */ public StoreFileScanner getScanner() throws IOException { - List sfs = new ArrayList(); + List sfs = new ArrayList<>(); sfs.add(sf); List sfScanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true, false, false, sf.getMaxMemstoreTS()); @@ -85,7 +85,7 @@ public class MobFile { public Cell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException { Cell result = null; StoreFileScanner scanner = null; - List sfs = new ArrayList(); + List sfs = new ArrayList<>(); sfs.add(sf); try { List sfScanners = StoreFileScanner.getScannersForStoreFiles(sfs, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java index fd623404899..25328b1f1d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFileCache.java @@ -102,7 +102,7 @@ public class MobFileCache { this.mobFileMaxCacheSize = conf.getInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, MobConstants.DEFAULT_MOB_FILE_CACHE_SIZE); isCacheEnabled = (mobFileMaxCacheSize > 0); - map = new ConcurrentHashMap(mobFileMaxCacheSize); + map = new ConcurrentHashMap<>(mobFileMaxCacheSize); if (isCacheEnabled) { long period = conf.getLong(MobConstants.MOB_CACHE_EVICT_PERIOD, MobConstants.DEFAULT_MOB_CACHE_EVICT_PERIOD); // in seconds @@ -136,12 +136,12 @@ public class MobFileCache { return; } printStatistics(); - List evictedFiles = new ArrayList(); + List evictedFiles = new ArrayList<>(); try { if (map.size() <= mobFileMaxCacheSize) { return; } - List files = new ArrayList(map.values()); + List files = new ArrayList<>(map.values()); Collections.sort(files); int start = (int) (mobFileMaxCacheSize * evictRemainRatio); if (start >= 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index 8191828592a..eb75120b10a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -314,7 +314,7 @@ public final class MobUtils { // no file found return; } - List filesToClean = new ArrayList(); + List filesToClean = new ArrayList<>(); int deletedFileCount = 0; for (FileStatus file : stats) { String fileName = file.getPath().getName(); @@ -482,7 +482,7 @@ public final class MobUtils { public static Cell createMobRefCell(Cell cell, byte[] fileName, Tag tableNameTag) { // Append the tags to the KeyValue. // The key is same, the value is the filename of the mob file - List tags = new ArrayList(); + List tags = new ArrayList<>(); // Add the ref tag as the 1st one. tags.add(MobConstants.MOB_REF_TAG); // Add the tag of the source table name, this table is where this mob file is flushed @@ -832,7 +832,7 @@ public final class MobUtils { if (maxThreads == 0) { maxThreads = 1; } - final SynchronousQueue queue = new SynchronousQueue(); + final SynchronousQueue queue = new SynchronousQueue<>(); ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, queue, Threads.newDaemonThreadFactory("MobCompactor"), new RejectedExecutionHandler() { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java index b6cf814c6b9..f1dcaee0f31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java @@ -71,7 +71,7 @@ public class PartitionedMobCompactionRequest extends MobCompactionRequest { * the same partition. */ protected static class CompactionPartition { - private List files = new ArrayList(); + private List files = new ArrayList<>(); private CompactionPartitionId partitionId; // The startKey and endKey of this partition, both are inclusive. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java index 7ff7db6882f..b49df28171e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java @@ -224,7 +224,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl if (getState() != State.RUNNING) { return map; } - Map rpcJSON = new HashMap(); + Map rpcJSON = new HashMap<>(); ArrayList paramList = new ArrayList(); map.put("rpcCall", rpcJSON); rpcJSON.put("queuetimems", getRPCQueueTime()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java index 27aacebcbd3..dda77ac4ad0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java @@ -148,7 +148,7 @@ class MonitoredTaskImpl implements MonitoredTask { @Override public Map toMap() { - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("description", getDescription()); map.put("status", getStatus()); map.put("state", getState()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java index 949b036401a..ff92704b961 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java @@ -72,7 +72,7 @@ public class TaskMonitor { MonitoredTask proxy = (MonitoredTask) Proxy.newProxyInstance( stat.getClass().getClassLoader(), new Class[] { MonitoredTask.class }, - new PassthroughInvocationHandler(stat)); + new PassthroughInvocationHandler<>(stat)); TaskAndWeakRefPair pair = new TaskAndWeakRefPair(stat, proxy); if (tasks.isFull()) { purgeExpiredTasks(); @@ -87,7 +87,7 @@ public class TaskMonitor { MonitoredRPCHandler proxy = (MonitoredRPCHandler) Proxy.newProxyInstance( stat.getClass().getClassLoader(), new Class[] { MonitoredRPCHandler.class }, - new PassthroughInvocationHandler(stat)); + new PassthroughInvocationHandler<>(stat)); TaskAndWeakRefPair pair = new TaskAndWeakRefPair(stat, proxy); rpcTasks.add(pair); return proxy; @@ -189,7 +189,7 @@ public class TaskMonitor { public TaskAndWeakRefPair(MonitoredTask stat, MonitoredTask proxy) { this.impl = stat; - this.weakProxy = new WeakReference(proxy); + this.weakProxy = new WeakReference<>(proxy); } public MonitoredTask get() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java index 523b05646fc..604f211417c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java @@ -47,7 +47,7 @@ class NamespaceStateManager { private volatile boolean initialized = false; public NamespaceStateManager(MasterServices masterServices) { - nsStateCache = new ConcurrentHashMap(); + nsStateCache = new ConcurrentHashMap<>(); master = masterServices; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java index 86651e42c10..d30de6e2c92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java @@ -40,7 +40,7 @@ class NamespaceTableAndRegionInfo { public NamespaceTableAndRegionInfo(String namespace) { this.name = namespace; - this.tableAndRegionInfo = new HashMap(); + this.tableAndRegionInfo = new HashMap<>(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java index 8161ffecc52..222c9334b85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java @@ -33,8 +33,7 @@ import org.apache.zookeeper.KeeperException; public class MasterProcedureManagerHost extends ProcedureManagerHost { - private Hashtable procedureMgrMap - = new Hashtable(); + private Hashtable procedureMgrMap = new Hashtable<>(); @Override public void loadProcedures(Configuration conf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java index 0279a6038de..1d20ba5a834 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java @@ -125,9 +125,9 @@ public class Procedure implements Callable, ForeignExceptionListener { public Procedure(ProcedureCoordinator coord, ForeignExceptionDispatcher monitor, long wakeFreq, long timeout, String procName, byte[] args, List expectedMembers) { this.coord = coord; - this.acquiringMembers = new ArrayList(expectedMembers); - this.inBarrierMembers = new ArrayList(acquiringMembers.size()); - this.dataFromFinishedMembers = new HashMap(); + this.acquiringMembers = new ArrayList<>(expectedMembers); + this.inBarrierMembers = new ArrayList<>(acquiringMembers.size()); + this.dataFromFinishedMembers = new HashMap<>(); this.procName = procName; this.args = args; this.monitor = monitor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java index b7e0c04ee89..8a64cc89fe8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java @@ -113,7 +113,7 @@ public class ProcedureCoordinator { public static ThreadPoolExecutor defaultPool(String coordName, int opThreads, long keepAliveMillis) { return new ThreadPoolExecutor(1, opThreads, keepAliveMillis, TimeUnit.MILLISECONDS, - new SynchronousQueue(), + new SynchronousQueue<>(), new DaemonThreadFactory("(" + coordName + ")-proc-coordinator-pool")); } @@ -325,6 +325,6 @@ public class ProcedureCoordinator { * @return Return set of all procedure names. */ public Set getProcedureNames() { - return new HashSet(procedures.keySet()); + return new HashSet<>(procedures.keySet()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java index 3ab4ac55585..f61ce1444aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java @@ -46,7 +46,7 @@ public abstract class ProcedureManagerHost { private static final Log LOG = LogFactory.getLog(ProcedureManagerHost.class); - protected Set procedures = new HashSet(); + protected Set procedures = new HashSet<>(); /** * Load system procedures. Read the class names from configuration. @@ -60,7 +60,7 @@ public abstract class ProcedureManagerHost { if (defaultProcClasses == null || defaultProcClasses.length == 0) return; - List configured = new ArrayList(); + List configured = new ArrayList<>(); for (String className : defaultProcClasses) { className = className.trim(); ClassLoader cl = this.getClass().getClassLoader(); @@ -105,7 +105,7 @@ public abstract class ProcedureManagerHost { } public Set getProcedureManagers() { - Set returnValue = new HashSet(); + Set returnValue = new HashSet<>(); for (E e: procedures) { returnValue.add(e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java index 485821e645c..baed1f3148d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java @@ -86,7 +86,7 @@ public class ProcedureMember implements Closeable { public static ThreadPoolExecutor defaultPool(String memberName, int procThreads, long keepAliveMillis) { return new ThreadPoolExecutor(1, procThreads, keepAliveMillis, TimeUnit.MILLISECONDS, - new SynchronousQueue(), + new SynchronousQueue<>(), new DaemonThreadFactory("member: '" + memberName + "' subprocedure-pool")); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java index 802a5ab2a43..7b624a5d606 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java @@ -68,7 +68,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager { private MasterServices master; private ProcedureCoordinator coordinator; - private Map procMap = new HashMap(); + private Map procMap = new HashMap<>(); private boolean stopped; public MasterFlushTableProcedureManager() {}; @@ -135,7 +135,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager { master.getConnection(), tableName, false); } - Set regionServers = new HashSet(regionsAndLocations.size()); + Set regionServers = new HashSet<>(regionsAndLocations.size()); for (Pair region : regionsAndLocations) { if (region != null && region.getFirst() != null && region.getSecond() != null) { HRegionInfo hri = region.getFirst(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 1aa959cad37..147c013e1bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -201,7 +201,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur private final ExecutorCompletionService taskPool; private final ThreadPoolExecutor executor; private volatile boolean stopped; - private final List> futures = new ArrayList>(); + private final List> futures = new ArrayList<>(); private final String name; FlushTableSubprocedurePool(String name, Configuration conf, Abortable abortable) { @@ -213,10 +213,10 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur int threads = conf.getInt(CONCURENT_FLUSH_TASKS_KEY, DEFAULT_CONCURRENT_FLUSH_TASKS); this.name = name; executor = new ThreadPoolExecutor(threads, threads, keepAlive, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), new DaemonThreadFactory("rs(" + new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs(" + name + ")-flush-proc-pool")); executor.allowCoreThreadTimeOut(true); - taskPool = new ExecutorCompletionService(executor); + taskPool = new ExecutorCompletionService<>(executor); } boolean hasTasks() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java index c301759200b..8f681f0a341 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java @@ -99,7 +99,7 @@ public class ReplicationProtbufUtil { buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) { // Accumulate all the Cells seen in here. - List> allCells = new ArrayList>(entries.length); + List> allCells = new ArrayList<>(entries.length); int size = 0; WALProtos.FamilyScope.Builder scopeBuilder = WALProtos.FamilyScope.newBuilder(); AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder(); @@ -165,7 +165,7 @@ public class ReplicationProtbufUtil { builder.setSourceHFileArchiveDirPath(sourceHFileArchiveDir.toString()); } - return new Pair(builder.build(), + return new Pair<>(builder.build(), getCellScanner(allCells, size)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 647a770df53..5dab2e3c738 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -80,9 +80,9 @@ public class MasterQuotaManager implements RegionStateListener { } LOG.info("Initializing quota support"); - namespaceLocks = new NamedLock(); - tableLocks = new NamedLock(); - userLocks = new NamedLock(); + namespaceLocks = new NamedLock<>(); + tableLocks = new NamedLock<>(); + userLocks = new NamedLock<>(); namespaceQuotaManager = new NamespaceAuditor(masterServices); namespaceQuotaManager.start(); @@ -460,7 +460,7 @@ public class MasterQuotaManager implements RegionStateListener { } private static class NamedLock { - private HashSet locks = new HashSet(); + private HashSet locks = new HashSet<>(); public void lock(final T name) throws InterruptedException { synchronized (locks) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index 145105254fd..ad916179920 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -65,12 +65,9 @@ public class QuotaCache implements Stoppable { // for testing purpose only, enforce the cache to be always refreshed static boolean TEST_FORCE_REFRESH = false; - private final ConcurrentHashMap namespaceQuotaCache = - new ConcurrentHashMap(); - private final ConcurrentHashMap tableQuotaCache = - new ConcurrentHashMap(); - private final ConcurrentHashMap userQuotaCache = - new ConcurrentHashMap(); + private final ConcurrentHashMap namespaceQuotaCache = new ConcurrentHashMap<>(); + private final ConcurrentHashMap tableQuotaCache = new ConcurrentHashMap<>(); + private final ConcurrentHashMap userQuotaCache = new ConcurrentHashMap<>(); private final RegionServerServices rsServices; private QuotaRefresherChore refreshChore; @@ -262,8 +259,8 @@ public class QuotaCache implements Stoppable { long evictPeriod = refreshPeriod * EVICT_PERIOD_FACTOR; // Find the quota entries to update - List gets = new ArrayList(); - List toRemove = new ArrayList(); + List gets = new ArrayList<>(); + List toRemove = new ArrayList<>(); for (Map.Entry entry: quotasMap.entrySet()) { long lastUpdate = entry.getValue().getLastUpdate(); long lastQuery = entry.getValue().getLastQuery(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index ab646b9649a..fd12fa1339a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -167,7 +167,7 @@ public class QuotaUtil extends QuotaTableUtil { long nowTs = EnvironmentEdgeManager.currentTime(); Result[] results = doGet(connection, gets); - Map userQuotas = new HashMap(results.length); + Map userQuotas = new HashMap<>(results.length); for (int i = 0; i < results.length; ++i) { byte[] key = gets.get(i).getRow(); assert isUserRowKey(key); @@ -232,7 +232,7 @@ public class QuotaUtil extends QuotaTableUtil { long nowTs = EnvironmentEdgeManager.currentTime(); Result[] results = doGet(connection, gets); - Map globalQuotas = new HashMap(results.length); + Map globalQuotas = new HashMap<>(results.length); for (int i = 0; i < results.length; ++i) { byte[] row = gets.get(i).getRow(); K key = kfr.getKeyFromRow(row); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java index cb00c34608d..21b4768bf33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/UserQuotaState.java @@ -123,7 +123,7 @@ public class UserQuotaState extends QuotaState { private Map setLimiter(Map limiters, final K key, final Quotas quotas) { if (limiters == null) { - limiters = new HashMap(); + limiters = new HashMap<>(); } QuotaLimiter limiter = quotas.hasThrottle() ? @@ -164,7 +164,7 @@ public class UserQuotaState extends QuotaState { if (otherMap != null) { // To Remove - Set toRemove = new HashSet(map.keySet()); + Set toRemove = new HashSet<>(map.keySet()); toRemove.removeAll(otherMap.keySet()); map.keySet().removeAll(toRemove); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java index 073562994d9..91c005025ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java @@ -70,7 +70,7 @@ public abstract class AbstractMultiFileWriter implements CellSink, ShipperListen LOG.debug("Commit " + writers.size() + " writers, maxSeqId=" + maxSeqId + ", majorCompaction=" + majorCompaction); } - List paths = new ArrayList(); + List paths = new ArrayList<>(); for (StoreFileWriter writer : writers) { if (writer == null) { continue; @@ -87,7 +87,7 @@ public abstract class AbstractMultiFileWriter implements CellSink, ShipperListen * Close all writers without throwing any exceptions. This is used when compaction failed usually. */ public List abortWriters() { - List paths = new ArrayList(); + List paths = new ArrayList<>(); for (StoreFileWriter writer : writers()) { try { if (writer != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java index c492180b681..6c98c1d2732 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java @@ -92,10 +92,8 @@ public class AnnotationReadingPriorityFunction implements PriorityFunction { }; // Some caches for helping performance - private final Map> argumentToClassMap = - new HashMap>(); - private final Map, Method>> methodMap = - new HashMap, Method>>(); + private final Map> argumentToClassMap = new HashMap<>(); + private final Map, Method>> methodMap = new HashMap<>(); private final float scanVirtualTimeWeight; @@ -121,7 +119,7 @@ public class AnnotationReadingPriorityFunction implements PriorityFunction { */ public AnnotationReadingPriorityFunction(final RSRpcServices rpcServices, Class clz) { - Map qosMap = new HashMap(); + Map qosMap = new HashMap<>(); for (Method m : clz.getMethods()) { QosPriority p = m.getAnnotation(QosPriority.class); if (p != null) { @@ -137,8 +135,8 @@ public class AnnotationReadingPriorityFunction implements PriorityFunction { this.rpcServices = rpcServices; this.annotatedQos = qosMap; if (methodMap.get("getRegion") == null) { - methodMap.put("hasRegion", new HashMap, Method>()); - methodMap.put("getRegion", new HashMap, Method>()); + methodMap.put("hasRegion", new HashMap<>()); + methodMap.put("getRegion", new HashMap<>()); } for (Class cls : knownArgumentClasses) { argumentToClassMap.put(cls.getName(), cls); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java index be2bd91041b..0b1ab18c2b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java @@ -57,7 +57,7 @@ implements RowProcessor { @Override public List getClusterIds() { - return new ArrayList(); + return new ArrayList<>(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java index 531bf66db87..9f0871253d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java @@ -48,7 +48,7 @@ public class CellSet implements NavigableSet { private final NavigableMap delegatee; /// CellSet(final CellComparator c) { - this.delegatee = new ConcurrentSkipListMap(c); + this.delegatee = new ConcurrentSkipListMap<>(c); } CellSet(final NavigableMap m) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 6870445ea3d..eba984a0133 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -285,14 +285,14 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi // not a special compaction request, so make our own list List ret = null; if (requests == null) { - ret = selectNow ? new ArrayList(r.getStores().size()) : null; + ret = selectNow ? new ArrayList<>(r.getStores().size()) : null; for (Store s : r.getStores()) { CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow, user); if (selectNow) ret.add(cr); } } else { Preconditions.checkArgument(selectNow); // only system requests have selectNow == false - ret = new ArrayList(requests.size()); + ret = new ArrayList<>(requests.size()); for (Pair pair : requests) { ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst(), user)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index 312e9fc9e5d..511bd80b8b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -322,7 +322,7 @@ public class CompactingMemStore extends AbstractMemStore { // The list of elements in pipeline + the active element + the snapshot segment // TODO : This will change when the snapshot is made of more than one element // The order is the Segment ordinal - List list = new ArrayList(order+1); + List list = new ArrayList<>(order+1); list.add(this.active.getScanner(readPt, order + 1)); for (Segment item : pipelineList) { list.add(item.getScanner(readPt, order)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index b037c892fe0..bea3e7f0bba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -248,7 +248,7 @@ public class CompactionTool extends Configured implements Tool { */ @Override public List getSplits(JobContext job) throws IOException { - List splits = new ArrayList(); + List splits = new ArrayList<>(); List files = listStatus(job); Text key = new Text(); @@ -301,7 +301,7 @@ public class CompactionTool extends Configured implements Tool { public static void createInputFile(final FileSystem fs, final Path path, final Set toCompactDirs) throws IOException { // Extract the list of store dirs - List storeDirs = new LinkedList(); + List storeDirs = new LinkedList<>(); for (Path compactDir: toCompactDirs) { if (isFamilyDir(fs, compactDir)) { storeDirs.add(compactDir); @@ -389,7 +389,7 @@ public class CompactionTool extends Configured implements Tool { @Override public int run(String[] args) throws Exception { - Set toCompactDirs = new HashSet(); + Set toCompactDirs = new HashSet<>(); boolean compactOnce = false; boolean major = false; boolean mapred = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java index 30d17fb9d91..73556bda753 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java @@ -65,7 +65,7 @@ public class CompositeImmutableSegment extends ImmutableSegment { @VisibleForTesting public List getAllSegments() { - return new LinkedList(segments); + return new LinkedList<>(segments); } public int getNumOfSegments() { @@ -150,7 +150,7 @@ public class CompositeImmutableSegment extends ImmutableSegment { */ public KeyValueScanner getScanner(long readPoint, long order) { KeyValueScanner resultScanner; - List list = new ArrayList(segments.size()); + List list = new ArrayList<>(segments.size()); for (ImmutableSegment s : segments) { list.add(s.getScanner(readPoint, order)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java index 2cea92f22de..e682597ae18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java @@ -34,8 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; @InterfaceAudience.Private public class DateTieredMultiFileWriter extends AbstractMultiFileWriter { - private final NavigableMap lowerBoundary2Writer - = new TreeMap(); + private final NavigableMap lowerBoundary2Writer = new TreeMap<>(); private final boolean needEmptyFile; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index a31c2c3f2ab..4757e1d01c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -129,7 +129,7 @@ public class DefaultMemStore extends AbstractMemStore { * Scanners are ordered from 0 (oldest) to newest in increasing order. */ public List getScanners(long readPt) throws IOException { - List list = new ArrayList(2); + List list = new ArrayList<>(2); list.add(this.active.getScanner(readPt, 1)); list.add(this.snapshot.getScanner(readPt, 0)); return Collections. singletonList( @@ -138,7 +138,7 @@ public class DefaultMemStore extends AbstractMemStore { @Override protected List getSegments() throws IOException { - List list = new ArrayList(2); + List list = new ArrayList<>(2); list.add(this.active); list.add(this.snapshot); return list; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java index db0ad0185e6..c37ae998af7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java @@ -91,7 +91,7 @@ class DefaultStoreFileManager implements StoreFileManager { @Override public void insertNewFiles(Collection sfs) throws IOException { - ArrayList newFiles = new ArrayList(storefiles); + ArrayList newFiles = new ArrayList<>(storefiles); newFiles.addAll(sfs); sortAndSetStoreFiles(newFiles); } @@ -106,7 +106,7 @@ class DefaultStoreFileManager implements StoreFileManager { @Override public Collection clearCompactedFiles() { List result = compactedfiles; - compactedfiles = new ArrayList(); + compactedfiles = new ArrayList<>(); return result; } @@ -126,10 +126,10 @@ class DefaultStoreFileManager implements StoreFileManager { sortAndSetStoreFiles(newStoreFiles); ArrayList updatedCompactedfiles = null; if (this.compactedfiles != null) { - updatedCompactedfiles = new ArrayList(this.compactedfiles); + updatedCompactedfiles = new ArrayList<>(this.compactedfiles); updatedCompactedfiles.addAll(newCompactedfiles); } else { - updatedCompactedfiles = new ArrayList(newCompactedfiles); + updatedCompactedfiles = new ArrayList<>(newCompactedfiles); } markCompactedAway(newCompactedfiles); this.compactedfiles = sortCompactedfiles(updatedCompactedfiles); @@ -149,7 +149,7 @@ class DefaultStoreFileManager implements StoreFileManager { public void removeCompactedFiles(Collection removedCompactedfiles) throws IOException { ArrayList updatedCompactedfiles = null; if (this.compactedfiles != null) { - updatedCompactedfiles = new ArrayList(this.compactedfiles); + updatedCompactedfiles = new ArrayList<>(this.compactedfiles); updatedCompactedfiles.removeAll(removedCompactedfiles); this.compactedfiles = sortCompactedfiles(updatedCompactedfiles); } @@ -157,7 +157,7 @@ class DefaultStoreFileManager implements StoreFileManager { @Override public final Iterator getCandidateFilesForRowKeyBefore(final KeyValue targetKey) { - return new ArrayList(Lists.reverse(this.storefiles)).iterator(); + return new ArrayList<>(Lists.reverse(this.storefiles)).iterator(); } @Override @@ -204,7 +204,7 @@ class DefaultStoreFileManager implements StoreFileManager { LOG.info("Found an expired store file: " + sf.getPath() + " whose maxTimeStamp is " + fileTs + ", which is below " + maxTs); if (expiredStoreFiles == null) { - expiredStoreFiles = new ArrayList(); + expiredStoreFiles = new ArrayList<>(); } expiredStoreFiles.add(sf); } @@ -220,7 +220,7 @@ class DefaultStoreFileManager implements StoreFileManager { private List sortCompactedfiles(List storefiles) { // Sorting may not be really needed here for the compacted files? Collections.sort(storefiles, storeFileComparator); - return new ArrayList(storefiles); + return new ArrayList<>(storefiles); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java index 93837b71cdf..8cb3a1d4dc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java @@ -46,7 +46,7 @@ public class DefaultStoreFlusher extends StoreFlusher { @Override public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId, MonitoredTask status, ThroughputController throughputController) throws IOException { - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); int cellsCount = snapshot.getCellsCount(); if (cellsCount == 0) return result; // don't flush if there are no entries diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java index bb57869bb38..6138f5f2200 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java @@ -55,7 +55,7 @@ public class FlushAllLargeStoresPolicy extends FlushLargeStoresPolicy{ } // start selection Collection stores = region.stores.values(); - Set specificStoresToFlush = new HashSet(); + Set specificStoresToFlush = new HashSet<>(); for (Store store : stores) { if (shouldFlush(store)) { specificStoresToFlush.add(store); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java index 61f5882e0d5..4318dcebc21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java @@ -39,7 +39,7 @@ public class FlushNonSloppyStoresFirstPolicy extends FlushLargeStoresPolicy { * @return the stores need to be flushed. */ @Override public Collection selectStoresToFlush() { - Collection specificStoresToFlush = new HashSet(); + Collection specificStoresToFlush = new HashSet<>(); for(Store store : regularStores) { if(shouldFlush(store) || region.shouldFlushStore(store)) { specificStoresToFlush.add(store); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index a990cebcb4f..b021430a328 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -91,7 +91,7 @@ public class HMobStore extends HStore { private volatile long mobScanCellsCount = 0; private volatile long mobScanCellsSize = 0; private HColumnDescriptor family; - private Map> map = new ConcurrentHashMap>(); + private Map> map = new ConcurrentHashMap<>(); private final IdLock keyLock = new IdLock(); // When we add a MOB reference cell to the HFile, we will add 2 tags along with it // 1. A ref tag with type TagType.MOB_REFERENCE_TAG_TYPE. This just denote this this cell is not @@ -109,7 +109,7 @@ public class HMobStore extends HStore { this.homePath = MobUtils.getMobHome(conf); this.mobFamilyPath = MobUtils.getMobFamilyPath(conf, this.getTableName(), family.getNameAsString()); - List locations = new ArrayList(2); + List locations = new ArrayList<>(2); locations.add(mobFamilyPath); TableName tn = region.getTableDesc().getTableName(); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils.getMobRegionInfo(tn) @@ -341,7 +341,7 @@ public class HMobStore extends HStore { try { locations = map.get(tableNameString); if (locations == null) { - locations = new ArrayList(2); + locations = new ArrayList<>(2); TableName tn = TableName.valueOf(tableNameString); locations.add(MobUtils.getMobFamilyPath(conf, tn, family.getNameAsString())); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index f2bc068e097..be012200c88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -250,11 +250,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // - the thread that owns the lock (allow reentrancy) // - reference count of (reentrant) locks held by the thread // - the row itself - private final ConcurrentHashMap lockedRows = - new ConcurrentHashMap(); + private final ConcurrentHashMap lockedRows = new ConcurrentHashMap<>(); - protected final Map stores = new ConcurrentSkipListMap( - Bytes.BYTES_RAWCOMPARATOR); + protected final Map stores = new ConcurrentSkipListMap<>(Bytes.BYTES_RAWCOMPARATOR); // TODO: account for each registered handler in HeapSize computation private Map coprocessorServiceHandlers = Maps.newHashMap(); @@ -336,7 +334,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // the maxSeqId up to which the store was flushed. And, skip the edits which // are equal to or lower than maxSeqId for each store. // The following map is populated when opening the region - Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** Saved state from replaying prepare flush cache */ private PrepareFlushResult prepareFlushResult = null; @@ -609,8 +607,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi final long rowProcessorTimeout; // Last flush time for each Store. Useful when we are flushing for each column - private final ConcurrentMap lastStoreFlushTimeMap = - new ConcurrentHashMap(); + private final ConcurrentMap lastStoreFlushTimeMap = new ConcurrentHashMap<>(); final RegionServerServices rsServices; private RegionServerAccounting rsAccounting; @@ -642,7 +639,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private final boolean regionStatsEnabled; // Stores the replication scope of the various column families of the table // that has non-default scope - private final NavigableMap replicationScope = new TreeMap( + private final NavigableMap replicationScope = new TreeMap<>( Bytes.BYTES_COMPARATOR); /** @@ -736,7 +733,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.rsServices = rsServices; this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); setHTableSpecificConf(); - this.scannerReadPoints = new ConcurrentHashMap(); + this.scannerReadPoints = new ConcurrentHashMap<>(); this.busyWaitDuration = conf.getLong( "hbase.busy.wait.duration", DEFAULT_BUSY_WAIT_DURATION); @@ -976,8 +973,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // initialize the thread pool for opening stores in parallel. ThreadPoolExecutor storeOpenerThreadPool = getStoreOpenAndCloseThreadPool("StoreOpener-" + this.getRegionInfo().getShortNameToLog()); - CompletionService completionService = - new ExecutorCompletionService(storeOpenerThreadPool); + CompletionService completionService = new ExecutorCompletionService<>(storeOpenerThreadPool); // initialize each store in parallel for (final HColumnDescriptor family : htableDescriptor.getFamilies()) { @@ -1054,12 +1050,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @return Map of StoreFiles by column family */ private NavigableMap> getStoreFiles() { - NavigableMap> allStoreFiles = - new TreeMap>(Bytes.BYTES_COMPARATOR); + NavigableMap> allStoreFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Store store: getStores()) { Collection storeFiles = store.getStorefiles(); if (storeFiles == null) continue; - List storeFileNames = new ArrayList(); + List storeFileNames = new ArrayList<>(); for (StoreFile storeFile: storeFiles) { storeFileNames.add(storeFile.getPath()); } @@ -1626,15 +1621,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } } - Map> result = - new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> result = new TreeMap<>(Bytes.BYTES_COMPARATOR); if (!stores.isEmpty()) { // initialize the thread pool for closing stores in parallel. ThreadPoolExecutor storeCloserThreadPool = getStoreOpenAndCloseThreadPool("StoreCloserThread-" + getRegionInfo().getRegionNameAsString()); CompletionService>> completionService = - new ExecutorCompletionService>>(storeCloserThreadPool); + new ExecutorCompletionService<>(storeCloserThreadPool); // close each store in parallel for (final Store store : stores.values()) { @@ -1652,8 +1646,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi .submit(new Callable>>() { @Override public Pair> call() throws IOException { - return new Pair>( - store.getFamily().getName(), store.close()); + return new Pair<>(store.getFamily().getName(), store.close()); } }); } @@ -1663,7 +1656,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Pair> storeFiles = future.get(); List familyFiles = result.get(storeFiles.getFirst()); if (familyFiles == null) { - familyFiles = new ArrayList(); + familyFiles = new ArrayList<>(); result.put(storeFiles.getFirst(), familyFiles); } familyFiles.addAll(storeFiles.getSecond()); @@ -2418,12 +2411,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi ((HStore) store).preFlushSeqIDEstimation()); } - TreeMap storeFlushCtxs - = new TreeMap(Bytes.BYTES_COMPARATOR); - TreeMap> committedFiles = new TreeMap>( - Bytes.BYTES_COMPARATOR); - TreeMap storeFlushableSize - = new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap storeFlushCtxs = new TreeMap<>(Bytes.BYTES_COMPARATOR); + TreeMap> committedFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR); + TreeMap storeFlushableSize = new TreeMap<>(Bytes.BYTES_COMPARATOR); // The sequence id of this flush operation which is used to log FlushMarker and pass to // createFlushContext to use as the store file's sequence id. It can be in advance of edits // still in the memstore, edits that are in other column families yet to be flushed. @@ -2561,7 +2551,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarker) { if (writeFlushWalMarker && wal != null && !writestate.readOnly) { FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.CANNOT_FLUSH, - getRegionInfo(), -1, new TreeMap>(Bytes.BYTES_COMPARATOR)); + getRegionInfo(), -1, new TreeMap<>(Bytes.BYTES_COMPARATOR)); try { WALUtil.writeFlushMarker(wal, this.getReplicationScope(), getRegionInfo(), desc, true, mvcc); @@ -2842,7 +2832,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi List cells = e.getValue(); assert cells instanceof RandomAccess; - Map kvCount = new TreeMap(Bytes.BYTES_COMPARATOR); + Map kvCount = new TreeMap<>(Bytes.BYTES_COMPARATOR); int listSize = cells.size(); for (int i=0; i < listSize; i++) { Cell cell = cells.get(i); @@ -3247,7 +3237,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // calling the pre CP hook for batch mutation if (!replay && coprocessorHost != null) { MiniBatchOperationInProgress miniBatchOp = - new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), + new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(), batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive); if (coprocessorHost.preBatchMutate(miniBatchOp)) { return; @@ -3401,7 +3391,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // calling the post CP hook for batch mutation if (!replay && coprocessorHost != null) { MiniBatchOperationInProgress miniBatchOp = - new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), + new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(), batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive); coprocessorHost.postBatchMutate(miniBatchOp); } @@ -3485,7 +3475,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // call the coprocessor hook to do any finalization steps // after the put is done MiniBatchOperationInProgress miniBatchOp = - new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), + new MiniBatchOperationInProgress<>(batchOp.getMutationsForCoprocs(), batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive); coprocessorHost.postBatchMutateIndispensably(miniBatchOp, success); } @@ -3599,7 +3589,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi for (byte[] family : familyMap.keySet()) { if (!this.htableDescriptor.hasFamily(family)) { if (nonExistentList == null) { - nonExistentList = new ArrayList(); + nonExistentList = new ArrayList<>(); } nonExistentList.add(family); } @@ -3915,7 +3905,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private void put(final byte [] row, byte [] family, List edits) throws IOException { NavigableMap> familyMap; - familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); + familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, edits); Put p = new Put(row); @@ -4164,7 +4154,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // If this flag is set, make use of the hfile archiving by making recovered.edits a fake // column family. Have to fake out file type too by casting our recovered.edits as storefiles String fakeFamilyName = WALSplitter.getRegionDirRecoveredEditsDir(regiondir).getName(); - Set fakeStoreFiles = new HashSet(files.size()); + Set fakeStoreFiles = new HashSet<>(files.size()); for (Path file: files) { fakeStoreFiles.add(new StoreFile(getRegionFileSystem().getFileSystem(), file, this.conf, null, null)); @@ -4506,7 +4496,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOException { long flushSeqId = flush.getFlushSequenceNumber(); - HashSet storesToFlush = new HashSet(); + HashSet storesToFlush = new HashSet<>(); for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) { byte[] family = storeFlush.getFamilyName().toByteArray(); Store store = getStore(family); @@ -5103,7 +5093,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi startRegionOperation(); // obtain region close lock try { - Map map = new HashMap(); + Map map = new HashMap<>(); synchronized (writestate) { for (Store store : getStores()) { // TODO: some stores might see new data from flush, while others do not which @@ -5280,7 +5270,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi @Override public List getStores() { - List list = new ArrayList(stores.size()); + List list = new ArrayList<>(stores.size()); list.addAll(stores.values()); return list; } @@ -5288,7 +5278,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi @Override public List getStoreFileList(final byte [][] columns) throws IllegalArgumentException { - List storeFileNames = new ArrayList(); + List storeFileNames = new ArrayList<>(); synchronized(closeLock) { for(byte[] column : columns) { Store store = this.stores.get(column); @@ -5560,8 +5550,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public Map> bulkLoadHFiles(Collection> familyPaths, boolean assignSeqId, BulkLoadListener bulkLoadListener, boolean copyFile) throws IOException { long seqId = -1; - Map> storeFiles = new TreeMap>(Bytes.BYTES_COMPARATOR); - Map storeFilesSizes = new HashMap(); + Map> storeFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR); + Map storeFilesSizes = new HashMap<>(); Preconditions.checkNotNull(familyPaths); // we need writeLock for multi-family bulk load startBulkRegionOperation(hasMultipleColumnFamilies(familyPaths)); @@ -5572,8 +5562,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // There possibly was a split that happened between when the split keys // were gathered and before the HRegion's write lock was taken. We need // to validate the HFile region before attempting to bulk load all of them - List ioes = new ArrayList(); - List> failures = new ArrayList>(); + List ioes = new ArrayList<>(); + List> failures = new ArrayList<>(); for (Pair p : familyPaths) { byte[] familyName = p.getFirst(); String path = p.getSecond(); @@ -5694,7 +5684,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if(storeFiles.containsKey(familyName)) { storeFiles.get(familyName).add(commitedStoreFile); } else { - List storeFileNames = new ArrayList(); + List storeFileNames = new ArrayList<>(); storeFileNames.add(commitedStoreFile); storeFiles.put(familyName, storeFileNames); } @@ -5841,11 +5831,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi throws IOException { // Here we separate all scanners into two lists - scanner that provide data required // by the filter to operate (scanners list) and all others (joinedScanners list). - List scanners = new ArrayList(scan.getFamilyMap().size()); - List joinedScanners = - new ArrayList(scan.getFamilyMap().size()); + List scanners = new ArrayList<>(scan.getFamilyMap().size()); + List joinedScanners = new ArrayList<>(scan.getFamilyMap().size()); // Store all already instantiated scanners for exception handling - List instantiatedScanners = new ArrayList(); + List instantiatedScanners = new ArrayList<>(); // handle additionalScanners if (additionalScanners != null && !additionalScanners.isEmpty()) { scanners.addAll(additionalScanners); @@ -5973,7 +5962,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // to handle scan or get operation. moreValues = nextInternal(outResults, scannerContext); } else { - List tmpList = new ArrayList(); + List tmpList = new ArrayList<>(); moreValues = nextInternal(tmpList, scannerContext); outResults.addAll(tmpList); } @@ -6837,7 +6826,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // The row key is the region name byte[] row = r.getRegionInfo().getRegionName(); final long now = EnvironmentEdgeManager.currentTime(); - final List cells = new ArrayList(2); + final List cells = new ArrayList<>(2); cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, now, r.getRegionInfo().toByteArray())); @@ -6930,7 +6919,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi @Override public List get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) throws IOException { - List results = new ArrayList(); + List results = new ArrayList<>(); // pre-get CP hook if (withCoprocessor && (coprocessorHost != null)) { @@ -7068,7 +7057,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi boolean locked = false; List acquiredRowLocks = null; - List mutations = new ArrayList(); + List mutations = new ArrayList<>(); Collection rowsToLock = processor.getRowsToLock(); // This is assigned by mvcc either explicity in the below or in the guts of the WAL append // when it assigns the edit a sequencedid (A.K.A the mvcc write number). @@ -7190,8 +7179,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } // Case with time bound - FutureTask task = - new FutureTask(new Callable() { + FutureTask task = new FutureTask<>(new Callable() { @Override public Void call() throws IOException { try { @@ -7280,7 +7268,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.writeRequestsCount.increment(); WriteEntry writeEntry = null; startRegionOperation(op); - List results = returnResults? new ArrayList(mutation.size()): null; + List results = returnResults? new ArrayList<>(mutation.size()): null; RowLock rowLock = null; MemstoreSize memstoreSize = new MemstoreSize(); try { @@ -7292,8 +7280,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return returnResults? cpResult: null; } Durability effectiveDurability = getEffectiveDurability(mutation.getDurability()); - Map> forMemStore = - new HashMap>(mutation.getFamilyCellMap().size()); + Map> forMemStore = new HashMap<>(mutation.getFamilyCellMap().size()); // Reckon Cells to apply to WAL -- in returned walEdit -- and what to add to memstore and // what to return back to the client (in 'forMemStore' and 'results' respectively). WALEdit walEdit = reckonDeltas(op, mutation, effectiveDurability, forMemStore, results); @@ -7468,7 +7455,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi final List deltas, final List results) throws IOException { byte [] columnFamily = store.getFamily().getName(); - List toApply = new ArrayList(deltas.size()); + List toApply = new ArrayList<>(deltas.size()); // Get previous values for all columns in this family. List currentValues = get(mutation, store, deltas, null/*Default IsolationLevel*/, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 97cc126364d..144f43b874c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -241,7 +241,7 @@ public class HRegionFileSystem { return null; } - ArrayList storeFiles = new ArrayList(files.length); + ArrayList storeFiles = new ArrayList<>(files.length); for (FileStatus status: files) { if (validate && !StoreFileInfo.isValid(status)) { LOG.warn("Invalid StoreFile: " + status.getPath()); @@ -355,7 +355,7 @@ public class HRegionFileSystem { FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs)); if (fds == null) return null; - ArrayList families = new ArrayList(fds.length); + ArrayList families = new ArrayList<>(fds.length); for (FileStatus status: fds) { families.add(status.getPath().getName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 66d2d4d893b..cbf6561b838 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -237,7 +237,7 @@ public class HRegionServer extends HasThread implements //true - if open region action in progress //false - if close region action in progress protected final ConcurrentMap regionsInTransitionInRS = - new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); // Cache flushing protected MemStoreFlusher cacheFlusher; @@ -280,7 +280,7 @@ public class HRegionServer extends HasThread implements * Map of regions currently being served by this region server. Key is the * encoded region name. All access should be synchronized. */ - protected final Map onlineRegions = new ConcurrentHashMap(); + protected final Map onlineRegions = new ConcurrentHashMap<>(); /** * Map of encoded region names to the DataNode locations they should be hosted on @@ -292,7 +292,7 @@ public class HRegionServer extends HasThread implements * and here we really mean DataNode locations. */ protected final Map regionFavoredNodesMap = - new ConcurrentHashMap(); + new ConcurrentHashMap<>(); /** * Set of regions currently being in recovering state which means it can accept writes(edits from @@ -321,7 +321,7 @@ public class HRegionServer extends HasThread implements // debugging and unit tests. private volatile boolean abortRequested; - ConcurrentMap rowlocks = new ConcurrentHashMap(); + ConcurrentMap rowlocks = new ConcurrentHashMap<>(); // A state before we go into stopped state. At this stage we're closing user // space regions. @@ -1323,7 +1323,7 @@ public class HRegionServer extends HasThread implements // Wait till all regions are closed before going out. int lastCount = -1; long previousLogTime = 0; - Set closedRegions = new HashSet(); + Set closedRegions = new HashSet<>(); boolean interrupted = false; try { while (!isOnlineRegionsEmpty()) { @@ -1746,7 +1746,7 @@ public class HRegionServer extends HasThread implements createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir); // listeners the wal factory will add to wals it creates. - final List listeners = new ArrayList(); + final List listeners = new ArrayList<>(); listeners.add(new MetricsWAL()); if (this.replicationSourceHandler != null && this.replicationSourceHandler.getWALActionsListener() != null) { @@ -2657,7 +2657,7 @@ public class HRegionServer extends HasThread implements */ SortedMap getCopyOfOnlineRegionsSortedBySize() { // we'll sort the regions in reverse - SortedMap sortedRegions = new TreeMap( + SortedMap sortedRegions = new TreeMap<>( new Comparator() { @Override public int compare(Long a, Long b) { @@ -2691,7 +2691,7 @@ public class HRegionServer extends HasThread implements * the first N regions being served regardless of load.) */ protected HRegionInfo[] getMostLoadedRegions() { - ArrayList regions = new ArrayList(); + ArrayList regions = new ArrayList<>(); for (Region r : onlineRegions.values()) { if (!r.isAvailable()) { continue; @@ -2903,7 +2903,7 @@ public class HRegionServer extends HasThread implements */ @Override public List getOnlineRegions(TableName tableName) { - List tableRegions = new ArrayList(); + List tableRegions = new ArrayList<>(); synchronized (this.onlineRegions) { for (Region region: this.onlineRegions.values()) { HRegionInfo regionInfo = region.getRegionInfo(); @@ -2917,7 +2917,7 @@ public class HRegionServer extends HasThread implements @Override public List getOnlineRegions() { - List allRegions = new ArrayList(); + List allRegions = new ArrayList<>(); synchronized (this.onlineRegions) { // Return a clone copy of the onlineRegions allRegions.addAll(onlineRegions.values()); @@ -2931,7 +2931,7 @@ public class HRegionServer extends HasThread implements */ @Override public Set getOnlineTables() { - Set tables = new HashSet(); + Set tables = new HashSet<>(); synchronized (this.onlineRegions) { for (Region region: this.onlineRegions.values()) { tables.add(region.getTableDesc().getTableName()); @@ -2942,7 +2942,7 @@ public class HRegionServer extends HasThread implements // used by org/apache/hbase/tmpl/regionserver/RSStatusTmpl.jamon (HBASE-4070). public String[] getRegionServerCoprocessors() { - TreeSet coprocessors = new TreeSet(); + TreeSet coprocessors = new TreeSet<>(); try { coprocessors.addAll(getWAL(null).getCoprocessorHost().getCoprocessors()); } catch (IOException exception) { @@ -3306,7 +3306,7 @@ public class HRegionServer extends HasThread implements // This map will contains all the regions that we closed for a move. // We add the time it was moved as we don't want to keep too old information protected Map movedRegions = - new ConcurrentHashMap(3000); + new ConcurrentHashMap<>(3000); // We need a timeout. If not there is a risk of giving a wrong information: this would double // the number of network calls instead of reducing them. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index b74e6353133..8a66c3a738b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -509,14 +509,13 @@ public class HStore implements Store { private List openStoreFiles(Collection files) throws IOException { if (files == null || files.isEmpty()) { - return new ArrayList(); + return new ArrayList<>(); } // initialize the thread pool for opening store files in parallel.. ThreadPoolExecutor storeFileOpenerThreadPool = this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.getColumnFamilyName()); - CompletionService completionService = - new ExecutorCompletionService(storeFileOpenerThreadPool); + CompletionService completionService = new ExecutorCompletionService<>(storeFileOpenerThreadPool); int totalValidStoreFile = 0; for (final StoreFileInfo storeFileInfo: files) { @@ -531,7 +530,7 @@ public class HStore implements Store { totalValidStoreFile++; } - ArrayList results = new ArrayList(files.size()); + ArrayList results = new ArrayList<>(files.size()); IOException ioe = null; try { for (int i = 0; i < totalValidStoreFile; i++) { @@ -588,7 +587,7 @@ public class HStore implements Store { @Override public void refreshStoreFiles(Collection newFiles) throws IOException { - List storeFiles = new ArrayList(newFiles.size()); + List storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file)); } @@ -605,16 +604,15 @@ public class HStore implements Store { private void refreshStoreFilesInternal(Collection newFiles) throws IOException { StoreFileManager sfm = storeEngine.getStoreFileManager(); Collection currentFiles = sfm.getStorefiles(); - if (currentFiles == null) currentFiles = new ArrayList(0); + if (currentFiles == null) currentFiles = new ArrayList<>(0); - if (newFiles == null) newFiles = new ArrayList(0); + if (newFiles == null) newFiles = new ArrayList<>(0); - HashMap currentFilesSet = - new HashMap(currentFiles.size()); + HashMap currentFilesSet = new HashMap<>(currentFiles.size()); for (StoreFile sf : currentFiles) { currentFilesSet.put(sf.getFileInfo(), sf); } - HashSet newFilesSet = new HashSet(newFiles); + HashSet newFilesSet = new HashSet<>(newFiles); Set toBeAddedFiles = Sets.difference(newFilesSet, currentFilesSet.keySet()); Set toBeRemovedFiles = Sets.difference(currentFilesSet.keySet(), newFilesSet); @@ -626,7 +624,7 @@ public class HStore implements Store { LOG.info("Refreshing store files for region " + this.getRegionInfo().getRegionNameAsString() + " files to add: " + toBeAddedFiles + " files to remove: " + toBeRemovedFiles); - Set toBeRemovedStoreFiles = new HashSet(toBeRemovedFiles.size()); + Set toBeRemovedStoreFiles = new HashSet<>(toBeRemovedFiles.size()); for (StoreFileInfo sfi : toBeRemovedFiles) { toBeRemovedStoreFiles.add(currentFilesSet.get(sfi)); } @@ -879,7 +877,7 @@ public class HStore implements Store { // close each store file in parallel CompletionService completionService = - new ExecutorCompletionService(storeFileCloserThreadPool); + new ExecutorCompletionService<>(storeFileCloserThreadPool); for (final StoreFile f : result) { completionService.submit(new Callable() { @Override @@ -1183,7 +1181,7 @@ public class HStore implements Store { // actually more correct, since memstore get put at the end. List sfScanners = StoreFileScanner.getScannersForStoreFiles(storeFilesToScan, cacheBlocks, usePread, isCompaction, false, matcher, readPt, isPrimaryReplicaStore()); - List scanners = new ArrayList(sfScanners.size() + 1); + List scanners = new ArrayList<>(sfScanners.size() + 1); scanners.addAll(sfScanners); // Then the memstore scanners scanners.addAll(memStoreScanners); @@ -1206,7 +1204,7 @@ public class HStore implements Store { } List sfScanners = StoreFileScanner.getScannersForStoreFiles(files, cacheBlocks, usePread, isCompaction, false, matcher, readPt, isPrimaryReplicaStore()); - List scanners = new ArrayList(sfScanners.size() + 1); + List scanners = new ArrayList<>(sfScanners.size() + 1); scanners.addAll(sfScanners); // Then the memstore scanners if (memStoreScanners != null) { @@ -1312,7 +1310,7 @@ public class HStore implements Store { // TODO: get rid of this! if (!this.conf.getBoolean("hbase.hstore.compaction.complete", true)) { LOG.warn("hbase.hstore.compaction.complete is set to false"); - sfs = new ArrayList(newFiles.size()); + sfs = new ArrayList<>(newFiles.size()); final boolean evictOnClose = cacheConf != null? cacheConf.shouldEvictOnClose(): true; for (Path newFile : newFiles) { @@ -1359,7 +1357,7 @@ public class HStore implements Store { private List moveCompatedFilesIntoPlace( final CompactionRequest cr, List newFiles, User user) throws IOException { - List sfs = new ArrayList(newFiles.size()); + List sfs = new ArrayList<>(newFiles.size()); for (Path newFile : newFiles) { assert newFile != null; final StoreFile sf = moveFileIntoPlace(newFile); @@ -1389,11 +1387,11 @@ public class HStore implements Store { private void writeCompactionWalRecord(Collection filesCompacted, Collection newFiles) throws IOException { if (region.getWAL() == null) return; - List inputPaths = new ArrayList(filesCompacted.size()); + List inputPaths = new ArrayList<>(filesCompacted.size()); for (StoreFile f : filesCompacted) { inputPaths.add(f.getPath()); } - List outputPaths = new ArrayList(newFiles.size()); + List outputPaths = new ArrayList<>(newFiles.size()); for (StoreFile f : newFiles) { outputPaths.add(f.getPath()); } @@ -1489,14 +1487,14 @@ public class HStore implements Store { // being in the store's folder) or they may be missing due to a compaction. String familyName = this.getColumnFamilyName(); - List inputFiles = new ArrayList(compactionInputs.size()); + List inputFiles = new ArrayList<>(compactionInputs.size()); for (String compactionInput : compactionInputs) { Path inputPath = fs.getStoreFilePath(familyName, compactionInput); inputFiles.add(inputPath.getName()); } //some of the input files might already be deleted - List inputStoreFiles = new ArrayList(compactionInputs.size()); + List inputStoreFiles = new ArrayList<>(compactionInputs.size()); for (StoreFile sf : this.getStorefiles()) { if (inputFiles.contains(sf.getPath().getName())) { inputStoreFiles.add(sf); @@ -1504,7 +1502,7 @@ public class HStore implements Store { } // check whether we need to pick up the new files - List outputStoreFiles = new ArrayList(compactionOutputs.size()); + List outputStoreFiles = new ArrayList<>(compactionOutputs.size()); if (pickCompactionFiles) { for (StoreFile sf : this.getStorefiles()) { @@ -1738,7 +1736,7 @@ public class HStore implements Store { } if (delSfs == null || delSfs.isEmpty()) return; - Collection newFiles = new ArrayList(); // No new files. + Collection newFiles = new ArrayList<>(); // No new files. writeCompactionWalRecord(delSfs, newFiles); replaceStoreFiles(delSfs, newFiles); completeCompaction(delSfs); @@ -2167,7 +2165,7 @@ public class HStore implements Store { this.snapshot = memstore.snapshot(); this.cacheFlushCount = snapshot.getCellsCount(); this.cacheFlushSize = snapshot.getDataSize(); - committedFiles = new ArrayList(1); + committedFiles = new ArrayList<>(1); } @Override @@ -2183,7 +2181,7 @@ public class HStore implements Store { if (this.tempFiles == null || this.tempFiles.isEmpty()) { return false; } - List storeFiles = new ArrayList(this.tempFiles.size()); + List storeFiles = new ArrayList<>(this.tempFiles.size()); for (Path storeFilePath : tempFiles) { try { StoreFile sf = HStore.this.commitFile(storeFilePath, cacheFlushSeqNum, status); @@ -2241,7 +2239,7 @@ public class HStore implements Store { @Override public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) throws IOException { - List storeFiles = new ArrayList(fileNames.size()); + List storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); @@ -2273,7 +2271,7 @@ public class HStore implements Store { if (snapshot == null) { return; } - HStore.this.updateStorefiles(new ArrayList(0), snapshot.getId()); + HStore.this.updateStorefiles(new ArrayList<>(0), snapshot.getId()); } } @@ -2424,7 +2422,7 @@ public class HStore implements Store { this.getStoreEngine().getStoreFileManager().getCompactedfiles(); if (compactedfiles != null && compactedfiles.size() != 0) { // Do a copy under read lock - copyCompactedfiles = new ArrayList(compactedfiles); + copyCompactedfiles = new ArrayList<>(compactedfiles); } else { if (LOG.isTraceEnabled()) { LOG.trace("No compacted files to archive"); @@ -2449,7 +2447,7 @@ public class HStore implements Store { */ private void removeCompactedfiles(Collection compactedfiles) throws IOException { - final List filesToRemove = new ArrayList(compactedfiles.size()); + final List filesToRemove = new ArrayList<>(compactedfiles.size()); for (final StoreFile file : compactedfiles) { synchronized (file) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index c7099a5806f..e83430656dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -106,7 +106,7 @@ public class HeapMemoryManager { private MetricsHeapMemoryManager metricsHeapMemoryManager; - private List tuneObservers = new ArrayList(); + private List tuneObservers = new ArrayList<>(); public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher, Server server, RegionServerAccounting regionServerAccounting) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java index faa9b67efef..501c1e92474 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java @@ -157,7 +157,7 @@ public class ImmutableSegment extends Segment { } public List getAllSegments() { - List res = new ArrayList(Arrays.asList(this)); + List res = new ArrayList<>(Arrays.asList(this)); return res; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index ff76d208a61..195e8f7491d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -88,11 +88,9 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner KeyValueHeap(List scanners, KVScannerComparator comparator) throws IOException { this.comparator = comparator; - this.scannersForDelayedClose = new ArrayList( - scanners.size()); + this.scannersForDelayedClose = new ArrayList<>(scanners.size()); if (!scanners.isEmpty()) { - this.heap = new PriorityQueue(scanners.size(), - this.comparator); + this.heap = new PriorityQueue<>(scanners.size(), this.comparator); for (KeyValueScanner scanner : scanners) { if (scanner.peek() != null) { this.heap.add(scanner); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java index 4af703c782e..b12b7b5fb34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java @@ -56,7 +56,7 @@ import java.io.IOException; public class Leases extends HasThread { private static final Log LOG = LogFactory.getLog(Leases.class.getName()); public static final int MIN_WAIT_TIME = 100; - private final Map leases = new ConcurrentHashMap(); + private final Map leases = new ConcurrentHashMap<>(); protected final int leaseCheckFrequency; protected volatile boolean stopRequested = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index 24f0d1ab718..9d1bc4bc628 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -54,8 +54,7 @@ public class LogRoller extends HasThread implements Closeable { private static final Log LOG = LogFactory.getLog(LogRoller.class); private final ReentrantLock rollLock = new ReentrantLock(); private final AtomicBoolean rollLog = new AtomicBoolean(false); - private final ConcurrentHashMap walNeedsRoll = - new ConcurrentHashMap(); + private final ConcurrentHashMap walNeedsRoll = new ConcurrentHashMap<>(); private final Server server; protected final RegionServerServices services; private volatile long lastrolltime = System.currentTimeMillis(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java index 8975ac789b5..a339abfbea8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java @@ -627,7 +627,7 @@ implements HeapSize, Map { */ private long addEntry(int hash, K key, V value, int bucketIndex) { Entry e = entries[bucketIndex]; - Entry newE = new Entry(hash, key, value, e, tailPtr); + Entry newE = new Entry<>(hash, key, value, e, tailPtr); entries[bucketIndex] = newE; // add as most recently used in lru if (size == 0) { @@ -810,7 +810,7 @@ implements HeapSize, Map { * @return Sorted list of entries */ public List> entryLruList() { - List> entryList = new ArrayList>(); + List> entryList = new ArrayList<>(); Entry entry = headPtr; while(entry != null) { entryList.add(entry); @@ -827,7 +827,7 @@ implements HeapSize, Map { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", justification="Unused debugging function that reads only") public Set> entryTableSet() { - Set> entrySet = new HashSet>(); + Set> entrySet = new HashSet<>(); Entry [] table = entries; // FindBugs IS2_INCONSISTENT_SYNC for(int i=0;i kvs = new ArrayList(); + private List kvs = new ArrayList<>(); private boolean hasMore; private Iterator kvsIterator; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index fd77cf9fccc..174d3ca449a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -77,10 +77,8 @@ class MemStoreFlusher implements FlushRequester { private Configuration conf; // These two data members go together. Any entry in the one must have // a corresponding entry in the other. - private final BlockingQueue flushQueue = - new DelayQueue(); - private final Map regionsInQueue = - new HashMap(); + private final BlockingQueue flushQueue = new DelayQueue<>(); + private final Map regionsInQueue = new HashMap<>(); private AtomicBoolean wakeupPending = new AtomicBoolean(); private final long threadWakeFrequency; @@ -92,7 +90,7 @@ class MemStoreFlusher implements FlushRequester { private final LongAdder updatesBlockedMsHighWater = new LongAdder(); private final FlushHandler[] flushHandlers; - private List flushRequestListeners = new ArrayList(1); + private List flushRequestListeners = new ArrayList<>(1); /** * @param conf @@ -131,7 +129,7 @@ class MemStoreFlusher implements FlushRequester { */ private boolean flushOneForGlobalPressure() { SortedMap regionsBySize = server.getCopyOfOnlineRegionsSortedBySize(); - Set excludedRegions = new HashSet(); + Set excludedRegions = new HashSet<>(); double secondaryMultiplier = ServerRegionReplicaUtil.getRegionReplicaStoreFileRefreshMultiplier(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java index 30e43117e7e..4e871354621 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java @@ -65,7 +65,7 @@ public class MemStoreLABImpl implements MemStoreLAB { static final Log LOG = LogFactory.getLog(MemStoreLABImpl.class); - private AtomicReference curChunk = new AtomicReference(); + private AtomicReference curChunk = new AtomicReference<>(); // A queue of chunks from pool contained by this memstore LAB // TODO: in the future, it would be better to have List implementation instead of Queue, // as FIFO order is not so important here diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java index e2f4ebb060a..77285349a94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java @@ -49,7 +49,7 @@ public abstract class MemStoreSegmentsIterator implements Iterator { this.scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); // list of Scanners of segments in the pipeline, when compaction starts - List scanners = new ArrayList(); + List scanners = new ArrayList<>(); // create the list of scanners to traverse over all the data // no dirty reads here as these are immutable segments @@ -61,4 +61,4 @@ public abstract class MemStoreSegmentsIterator implements Iterator { } public abstract void close(); -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java index 995ea933f55..a0cd79d9a02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java @@ -131,7 +131,7 @@ MultiRowMutationProcessorResponse> { Arrays.fill(opStatus, OperationStatus.NOT_RUN); WALEdit[] walEditsFromCP = new WALEdit[mutations.size()]; if (coprocessorHost != null) { - miniBatch = new MiniBatchOperationInProgress( + miniBatch = new MiniBatchOperationInProgress<>( mutations.toArray(new Mutation[mutations.size()]), opStatus, walEditsFromCP, 0, mutations.size()); coprocessorHost.preBatchMutate(miniBatch); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java index ee4fbb93b5e..ffcc8342744 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java @@ -54,7 +54,7 @@ public class MultiVersionConcurrencyControl { // reduce the number of allocations on the write path? // This could be equal to the number of handlers + a small number. // TODO: St.Ack 20150903 Sounds good to me. - private final LinkedList writeQueue = new LinkedList(); + private final LinkedList writeQueue = new LinkedList<>(); public MultiVersionConcurrencyControl() { super(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index e6e43a4df9a..7312852e5ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -323,7 +323,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * completion of multiGets. */ static class RegionScannersCloseCallBack implements RpcCallback { - private final List scanners = new ArrayList(); + private final List scanners = new ArrayList<>(); public void addScanner(RegionScanner scanner) { this.scanners.add(scanner); @@ -818,7 +818,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, case DELETE: // Collect the individual mutations and apply in a batch if (mutations == null) { - mutations = new ArrayList(actions.getActionCount()); + mutations = new ArrayList<>(actions.getActionCount()); } mutations.add(action); break; @@ -834,7 +834,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, pbResult = ProtobufUtil.toResultNoData(r); // Hard to guess the size here. Just make a rough guess. if (cellsToReturn == null) { - cellsToReturn = new ArrayList(); + cellsToReturn = new ArrayList<>(); } cellsToReturn.add(r); } else { @@ -1301,7 +1301,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * @return list of blocking services and their security info classes that this server supports */ protected List getServices() { - List bssi = new ArrayList(2); + List bssi = new ArrayList<>(2); bssi.add(new BlockingServiceAndInterface( ClientService.newReflectiveBlockingService(this), ClientService.BlockingInterface.class)); @@ -1543,7 +1543,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, checkOpen(); requestCount.increment(); Map onlineRegions = regionServer.onlineRegions; - List list = new ArrayList(onlineRegions.size()); + List list = new ArrayList<>(onlineRegions.size()); for (Region region: onlineRegions.values()) { list.add(region.getRegionInfo()); } @@ -1587,7 +1587,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { regions = regionServer.getOnlineRegions(); } - List rLoads = new ArrayList(regions.size()); + List rLoads = new ArrayList<>(regions.size()); RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); @@ -1636,7 +1636,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (request.getFamilyCount() == 0) { columnFamilies = region.getTableDesc().getFamiliesKeys(); } else { - columnFamilies = new TreeSet(Bytes.BYTES_RAWCOMPARATOR); + columnFamilies = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR); for (ByteString cf: request.getFamilyList()) { columnFamilies.add(cf.toByteArray()); } @@ -1692,8 +1692,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder(); final int regionCount = request.getOpenInfoCount(); - final Map htds = - new HashMap(regionCount); + final Map htds = new HashMap<>(regionCount); final boolean isBulkAssign = regionCount > 1; try { checkOpen(); @@ -1783,7 +1782,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { // Remove stale recovery region from ZK when we open region not for recovering which // could happen when turn distributedLogReplay off from on. - List tmpRegions = new ArrayList(); + List tmpRegions = new ArrayList<>(); tmpRegions.add(region.getEncodedName()); ZKSplitLog.deleteRecoveringRegionZNodes(regionServer.getZooKeeper(), tmpRegions); @@ -1914,7 +1913,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() : null; // do not invoke coprocessors if this is a secondary region replica - List> walEntries = new ArrayList>(); + List> walEntries = new ArrayList<>(); // Skip adding the edits to WAL if this is a secondary region replica boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()); @@ -1935,8 +1934,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, nonce, entry.getKey().getWriteTime()); } - Pair walEntry = (coprocessorHost == null) ? null : - new Pair(); + Pair walEntry = (coprocessorHost == null) ? null : new Pair<>(); List edits = WALSplitter.getMutationsFromWALEntry(entry, cells, walEntry, durability); if (coprocessorHost != null) { @@ -2132,11 +2130,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (!request.hasBulkToken()) { // Old style bulk load. This will not be supported in future releases - List> familyPaths = - new ArrayList>(request.getFamilyPathCount()); + List> familyPaths = new ArrayList<>(request.getFamilyPathCount()); for (FamilyPath familyPath : request.getFamilyPathList()) { - familyPaths.add(new Pair(familyPath.getFamily().toByteArray(), familyPath - .getPath())); + familyPaths.add(new Pair<>(familyPath.getFamily().toByteArray(), familyPath.getPath())); } if (region.getCoprocessorHost() != null) { bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths); @@ -2317,7 +2313,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private Result get(Get get, HRegion region, RegionScannersCloseCallBack closeCallBack, RpcCallContext context) throws IOException { region.prepareGet(get); - List results = new ArrayList(); + List results = new ArrayList<>(); boolean stale = region.getRegionInfo().getReplicaId() != 0; // pre-get CP hook if (region.getCoprocessorHost() != null) { @@ -2789,7 +2785,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // This is cells inside a row. Default size is 10 so if many versions or many cfs, // then we'll resize. Resizings show in profiler. Set it higher than 10. For now // arbitrary 32. TODO: keep record of general size of results being returned. - List values = new ArrayList(32); + List values = new ArrayList<>(32); region.startRegionOperation(Operation.SCAN); try { int i = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 649273d97a0..925e349ef28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -361,7 +361,7 @@ public class RegionCoprocessorHost // scan the table attributes for coprocessor load specifications // initialize the coprocessors - List configured = new ArrayList(); + List configured = new ArrayList<>(); for (TableCoprocessorAttribute attr: getTableCoprocessorAttrsFromSchema(conf, region.getTableDesc())) { // Load encompasses classloading and coprocessor initialization @@ -405,7 +405,7 @@ public class RegionCoprocessorHost // remain in this map classData = (ConcurrentMap)sharedDataMap.get(implClass.getName()); if (classData == null) { - classData = new ConcurrentHashMap(); + classData = new ConcurrentHashMap<>(); sharedDataMap.put(implClass.getName(), classData); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java index 39043931c43..91e28fdb833 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java @@ -45,7 +45,7 @@ public class RegionServerAccounting { // Store the edits size during replaying WAL. Use this to roll back the // global memstore size once a region opening failed. private final ConcurrentMap replayEditsPerRegion = - new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); private final Configuration conf; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java index 82e67786cbc..ea346ea10a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java @@ -40,7 +40,7 @@ public class RegionServicesForStores { private static final int POOL_SIZE = 10; private static final ThreadPoolExecutor INMEMORY_COMPACTION_POOL = new ThreadPoolExecutor(POOL_SIZE, POOL_SIZE, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(), + new LinkedBlockingQueue<>(), new ThreadFactory() { @Override public Thread newThread(Runnable r) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java index 1accae187bd..b1473cb4107 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java @@ -140,8 +140,7 @@ public class SecureBulkLoadManager { List bulkLoadObservers = getBulkLoadObservers(region); if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) { - ObserverContext ctx = - new ObserverContext(getActiveUser()); + ObserverContext ctx = new ObserverContext<>(getActiveUser()); ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost() .findCoprocessorEnvironment(BulkLoadObserver.class).get(0)); @@ -162,8 +161,7 @@ public class SecureBulkLoadManager { List bulkLoadObservers = getBulkLoadObservers(region); if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) { - ObserverContext ctx = - new ObserverContext(getActiveUser()); + ObserverContext ctx = new ObserverContext<>(getActiveUser()); ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost() .findCoprocessorEnvironment(BulkLoadObserver.class).get(0)); @@ -177,9 +175,9 @@ public class SecureBulkLoadManager { public Map> secureBulkLoadHFiles(final Region region, final BulkLoadHFileRequest request) throws IOException { - final List> familyPaths = new ArrayList>(request.getFamilyPathCount()); + final List> familyPaths = new ArrayList<>(request.getFamilyPathCount()); for(ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) { - familyPaths.add(new Pair(el.getFamily().toByteArray(), el.getPath())); + familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath())); } Token userToken = null; @@ -324,7 +322,7 @@ public class SecureBulkLoadManager { this.fs = fs; this.stagingDir = stagingDir; this.conf = conf; - this.origPermissions = new HashMap(); + this.origPermissions = new HashMap<>(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 858151727aa..11d51d81ad0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -55,7 +55,7 @@ public abstract class Segment { public final static long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.ATOMIC_REFERENCE + ClassSize.CELL_SET + ClassSize.ATOMIC_LONG + ClassSize.TIMERANGE_TRACKER; - private AtomicReference cellSet= new AtomicReference(); + private AtomicReference cellSet= new AtomicReference<>(); private final CellComparator comparator; protected long minSequenceId; private MemStoreLAB memStoreLAB; @@ -115,7 +115,7 @@ public abstract class Segment { } public List getScanners(long readPoint, long order) { - List scanners = new ArrayList(1); + List scanners = new ArrayList<>(1); scanners.add(getScanner(readPoint, order)); return scanners; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java index 7e5302694df..1a8b89d75c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java @@ -108,7 +108,7 @@ public final class SegmentFactory { } private MemStoreLAB getMergedMemStoreLAB(Configuration conf, List segments) { - List mslabs = new ArrayList(); + List mslabs = new ArrayList<>(); if (!conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) { return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java index 11e46a4633b..874ca44e792 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java @@ -121,8 +121,7 @@ public class ServerNonceManager { * which is a realistic worst case. If it's much worse, we could use some sort of memory * limit and cleanup. */ - private ConcurrentHashMap nonces = - new ConcurrentHashMap(); + private ConcurrentHashMap nonces = new ConcurrentHashMap<>(); private int deleteNonceGracePeriod; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java index f19f26fe2f2..bdae05ac749 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java @@ -58,7 +58,7 @@ public class ShutdownHook { * to be executed after the last regionserver referring to a given filesystem * stops. We keep track of the # of regionserver references in values of the map. */ - private final static Map fsShutdownHooks = new HashMap(); + private final static Map fsShutdownHooks = new HashMap<>(); /** * Install a shutdown hook that calls stop on the passed Stoppable diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 52811f6e806..ca7dfd4cb31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -125,7 +125,7 @@ public class StoreFileScanner implements KeyValueScanner { public static List getScannersForStoreFiles(Collection files, boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean canUseDrop, ScanQueryMatcher matcher, long readPt, boolean isPrimaryReplica) throws IOException { - List scanners = new ArrayList(files.size()); + List scanners = new ArrayList<>(files.size()); List sorted_files = new ArrayList<>(files); Collections.sort(sorted_files, StoreFile.Comparators.SEQ_ID); for (int i = 0; i < sorted_files.size(); i++) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index abfd3fccb04..23fae6a97bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -119,7 +119,7 @@ abstract class StoreFlusher { ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); // no control on system table (such as meta, namespace, etc) flush diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 5c21a414b29..99ec30ed94a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -92,7 +92,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Collects all the KVHeap that are eagerly getting closed during the // course of a scan - protected List heapsForDelayedClose = new ArrayList(); + protected List heapsForDelayedClose = new ArrayList<>(); /** * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not @@ -131,9 +131,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Indicates whether there was flush during the course of the scan protected volatile boolean flushed = false; // generally we get one file from a flush - protected List flushedStoreFiles = new ArrayList(1); + protected List flushedStoreFiles = new ArrayList<>(1); // The current list of scanners - protected List currentScanners = new ArrayList(); + protected List currentScanners = new ArrayList<>(); // flush update lock private ReentrantLock flushLock = new ReentrantLock(); @@ -428,8 +428,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner filesOnly = false; } - List scanners = - new ArrayList(allScanners.size()); + List scanners = new ArrayList<>(allScanners.size()); // We can only exclude store files based on TTL if minVersions is set to 0. // Otherwise, we might have to return KVs that have technically expired. @@ -940,8 +939,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner if (scanners.isEmpty()) return; int storeFileScannerCount = scanners.size(); CountDownLatch latch = new CountDownLatch(storeFileScannerCount); - List handlers = - new ArrayList(storeFileScannerCount); + List handlers = new ArrayList<>(storeFileScannerCount); for (KeyValueScanner scanner : scanners) { if (scanner instanceof StoreFileScanner) { ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, @@ -972,7 +970,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * @return all scanners in no particular order */ List getAllScannersForTesting() { - List allScanners = new ArrayList(); + List allScanners = new ArrayList<>(); KeyValueScanner current = heap.getCurrentForTesting(); if (current != null) allScanners.add(current); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index a2a0dcccc44..0ec41b36b56 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -79,7 +79,7 @@ public class StorefileRefresherChore extends ScheduledChore { throw new RuntimeException(REGIONSERVER_STOREFILE_REFRESH_PERIOD + " should be set smaller than half of " + TimeToLiveHFileCleaner.TTL_CONF_KEY); } - lastRefreshTimes = new HashMap(); + lastRefreshTimes = new HashMap<>(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index 2662dd12c9b..73924926b21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -143,7 +143,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { byte[] majorRangeFrom, byte[] majorRangeTo) throws IOException { super(comparator); this.boundaries = targetBoundaries; - this.existingWriters = new ArrayList(this.boundaries.size() - 1); + this.existingWriters = new ArrayList<>(this.boundaries.size() - 1); // "major" range (range for which all files are included) boundaries, if any, // must match some target boundaries, let's find them. assert (majorRangeFrom == null) == (majorRangeTo == null); @@ -283,8 +283,8 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { this.left = left; this.right = right; int preallocate = Math.min(this.targetCount, 64); - this.existingWriters = new ArrayList(preallocate); - this.boundaries = new ArrayList(preallocate + 1); + this.existingWriters = new ArrayList<>(preallocate); + this.boundaries = new ArrayList<>(preallocate + 1); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index 92556346da8..1e78ab2cf56 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -84,7 +84,7 @@ public class StripeStoreEngine extends StoreEngine()) : this.stripeRequest.getRequest(); + ? new CompactionRequest(new ArrayList<>()) : this.stripeRequest.getRequest(); return this.stripeRequest != null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index 1b3c9f8cc1f..4a719f3b175 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -100,8 +100,7 @@ public class StripeStoreFileManager * same index, except the last one. Inside each list, the files are in reverse order by * seqNum. Note that the length of this is one higher than that of stripeEndKeys. */ - public ArrayList> stripeFiles - = new ArrayList>(); + public ArrayList> stripeFiles = new ArrayList<>(); /** Level 0. The files are in reverse order by seqNum. */ public ImmutableList level0Files = ImmutableList.of(); @@ -112,8 +111,8 @@ public class StripeStoreFileManager private State state = null; /** Cached file metadata (or overrides as the case may be) */ - private HashMap fileStarts = new HashMap(); - private HashMap fileEnds = new HashMap(); + private HashMap fileStarts = new HashMap<>(); + private HashMap fileEnds = new HashMap<>(); /** Normally invalid key is null, but in the map null is the result for "no key"; so use * the following constant value in these maps instead. Note that this is a constant and * we use it to compare by reference when we read from the map. */ @@ -277,7 +276,7 @@ public class StripeStoreFileManager } private byte[] getSplitPointFromAllFiles() throws IOException { - ConcatenatedLists sfs = new ConcatenatedLists(); + ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addSublist(state.level0Files); sfs.addAllSublists(state.stripeFiles); if (sfs.isEmpty()) return null; @@ -305,7 +304,7 @@ public class StripeStoreFileManager return state.allFilesCached; // We need to read all files. } - ConcatenatedLists result = new ConcatenatedLists(); + ConcatenatedLists result = new ConcatenatedLists<>(); result.addAllSublists(state.stripeFiles.subList(firstStripe, lastStripe + 1)); result.addSublist(state.level0Files); return result; @@ -385,9 +384,8 @@ public class StripeStoreFileManager */ private void loadUnclassifiedStoreFiles(List storeFiles) { LOG.debug("Attempting to load " + storeFiles.size() + " store files."); - TreeMap> candidateStripes = - new TreeMap>(MAP_COMPARATOR); - ArrayList level0Files = new ArrayList(); + TreeMap> candidateStripes = new TreeMap<>(MAP_COMPARATOR); + ArrayList level0Files = new ArrayList<>(); // Separate the files into tentative stripes; then validate. Currently, we rely on metadata. // If needed, we could dynamically determine the stripes in future. for (StoreFile sf : storeFiles) { @@ -405,7 +403,7 @@ public class StripeStoreFileManager } else { ArrayList stripe = candidateStripes.get(endRow); if (stripe == null) { - stripe = new ArrayList(); + stripe = new ArrayList<>(); candidateStripes.put(endRow, stripe); } insertFileIntoStripe(stripe, sf); @@ -477,9 +475,9 @@ public class StripeStoreFileManager // Copy the results into the fields. State state = new State(); state.level0Files = ImmutableList.copyOf(level0Files); - state.stripeFiles = new ArrayList>(candidateStripes.size()); + state.stripeFiles = new ArrayList<>(candidateStripes.size()); state.stripeEndRows = new byte[Math.max(0, candidateStripes.size() - 1)][]; - ArrayList newAllFiles = new ArrayList(level0Files); + ArrayList newAllFiles = new ArrayList<>(level0Files); int i = candidateStripes.size() - 1; for (Map.Entry> entry : candidateStripes.entrySet()) { state.stripeFiles.add(ImmutableList.copyOf(entry.getValue())); @@ -685,7 +683,7 @@ public class StripeStoreFileManager this.nextWasCalled = false; List src = components.get(currentComponent); if (src instanceof ImmutableList) { - src = new ArrayList(src); + src = new ArrayList<>(src); components.set(currentComponent, src); } src.remove(indexWithinComponent); @@ -711,13 +709,12 @@ public class StripeStoreFileManager private Collection compactedFiles = null; private Collection results = null; - private List l0Results = new ArrayList(); + private List l0Results = new ArrayList<>(); private final boolean isFlush; public CompactionOrFlushMergeCopy(boolean isFlush) { // Create a lazy mutable copy (other fields are so lazy they start out as nulls). - this.stripeFiles = new ArrayList>( - StripeStoreFileManager.this.state.stripeFiles); + this.stripeFiles = new ArrayList<>(StripeStoreFileManager.this.state.stripeFiles); this.isFlush = isFlush; } @@ -755,15 +752,14 @@ public class StripeStoreFileManager : ImmutableList.copyOf(this.level0Files); newState.stripeEndRows = (this.stripeEndRows == null) ? oldState.stripeEndRows : this.stripeEndRows.toArray(new byte[this.stripeEndRows.size()][]); - newState.stripeFiles = new ArrayList>(this.stripeFiles.size()); + newState.stripeFiles = new ArrayList<>(this.stripeFiles.size()); for (List newStripe : this.stripeFiles) { newState.stripeFiles.add(newStripe instanceof ImmutableList ? (ImmutableList)newStripe : ImmutableList.copyOf(newStripe)); } - List newAllFiles = new ArrayList(oldState.allFilesCached); - List newAllCompactedFiles = - new ArrayList(oldState.allCompactedFilesCached); + List newAllFiles = new ArrayList<>(oldState.allFilesCached); + List newAllCompactedFiles = new ArrayList<>(oldState.allCompactedFilesCached); if (!isFlush) { newAllFiles.removeAll(compactedFiles); if (delCompactedFiles) { @@ -803,7 +799,7 @@ public class StripeStoreFileManager List stripeCopy = this.stripeFiles.get(index); ArrayList result = null; if (stripeCopy instanceof ImmutableList) { - result = new ArrayList(stripeCopy); + result = new ArrayList<>(stripeCopy); this.stripeFiles.set(index, result); } else { result = (ArrayList)stripeCopy; @@ -816,7 +812,7 @@ public class StripeStoreFileManager */ private final ArrayList getLevel0Copy() { if (this.level0Files == null) { - this.level0Files = new ArrayList(StripeStoreFileManager.this.state.level0Files); + this.level0Files = new ArrayList<>(StripeStoreFileManager.this.state.level0Files); } return this.level0Files; } @@ -849,7 +845,7 @@ public class StripeStoreFileManager // Make a new candidate stripe. if (newStripes == null) { - newStripes = new TreeMap(MAP_COMPARATOR); + newStripes = new TreeMap<>(MAP_COMPARATOR); } StoreFile oldSf = newStripes.put(endRow, sf); if (oldSf != null) { @@ -893,8 +889,7 @@ public class StripeStoreFileManager TreeMap newStripes) throws IOException { // Validate that the removed and added aggregate ranges still make for a full key space. boolean hasStripes = !this.stripeFiles.isEmpty(); - this.stripeEndRows = new ArrayList( - Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); + this.stripeEndRows = new ArrayList<>(Arrays.asList(StripeStoreFileManager.this.state.stripeEndRows)); int removeFrom = 0; byte[] firstStartRow = startOf(newStripes.firstEntry().getValue()); byte[] lastEndRow = newStripes.lastKey(); @@ -917,7 +912,7 @@ public class StripeStoreFileManager int removeTo = findStripeIndexByEndRow(lastEndRow); if (removeTo < 0) throw new IOException("Compaction is trying to add a bad range."); // See if there are files in the stripes we are trying to replace. - ArrayList conflictingFiles = new ArrayList(); + ArrayList conflictingFiles = new ArrayList<>(); for (int removeIndex = removeTo; removeIndex >= removeFrom; --removeIndex) { conflictingFiles.addAll(this.stripeFiles.get(removeIndex)); } @@ -973,7 +968,7 @@ public class StripeStoreFileManager } } // Add the new stripe. - ArrayList tmp = new ArrayList(); + ArrayList tmp = new ArrayList<>(); tmp.add(newStripe.getValue()); stripeFiles.add(insertAt, tmp); previousEndRow = newStripe.getKey(); @@ -992,8 +987,8 @@ public class StripeStoreFileManager @Override public List getStripeBoundaries() { - if (this.state.stripeFiles.isEmpty()) return new ArrayList(); - ArrayList result = new ArrayList(this.state.stripeEndRows.length + 2); + if (this.state.stripeFiles.isEmpty()) return new ArrayList<>(); + ArrayList result = new ArrayList<>(this.state.stripeEndRows.length + 2); result.add(OPEN_KEY); Collections.addAll(result, this.state.stripeEndRows); result.add(OPEN_KEY); @@ -1033,7 +1028,7 @@ public class StripeStoreFileManager LOG.info("Found an expired store file: " + sf.getPath() + " whose maxTimeStamp is " + fileTs + ", which is below " + maxTs); if (expiredStoreFiles == null) { - expiredStoreFiles = new ArrayList(); + expiredStoreFiles = new ArrayList<>(); } expiredStoreFiles.add(sf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 22c3ce74f12..85bae9d6e61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -57,7 +57,7 @@ public class StripeStoreFlusher extends StoreFlusher { @Override public List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum, MonitoredTask status, ThroughputController throughputController) throws IOException { - List result = new ArrayList(); + List result = new ArrayList<>(); int cellsCount = snapshot.getCellsCount(); if (cellsCount == 0) return result; // don't flush if there are no entries diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 6a3ff4afc20..3d4f9a13480 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -96,7 +96,7 @@ public class CompactionRequest implements Comparable { * @return The result (may be "this" or "other"). */ public CompactionRequest combineWith(CompactionRequest other) { - this.filesToCompact = new ArrayList(other.getFiles()); + this.filesToCompact = new ArrayList<>(other.getFiles()); this.isOffPeak = other.isOffPeak; this.isMajor = other.isMajor; this.priority = other.priority; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 1fe50771105..d72529afcca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -294,7 +294,7 @@ public abstract class Compactor { if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", true)) { // clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles, // HFiles, and their readers - readersToClose = new ArrayList(request.getFiles().size()); + readersToClose = new ArrayList<>(request.getFiles().size()); for (StoreFile f : request.getFiles()) { StoreFile clonedStoreFile = f.cloneForReader(); // create the reader after the store file is cloned in case @@ -320,7 +320,7 @@ public abstract class Compactor { scanner = postCreateCoprocScanner(request, scanType, scanner, user); if (scanner == null) { // NULL scanner returned from coprocessor hooks means skip normal processing. - return new ArrayList(); + return new ArrayList<>(); } boolean cleanSeqId = false; if (fd.minSeqIdToKeep > 0) { @@ -413,7 +413,7 @@ public abstract class Compactor { long bytesWrittenProgressForShippedCall = 0; // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. - List cells = new ArrayList(); + List cells = new ArrayList<>(); long closeCheckSizeLimit = HStore.getCloseCheckInterval(); long lastMillis = 0; if (LOG.isDebugEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index e37a7fe8bf2..6413ee6d16b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -99,7 +99,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { @VisibleForTesting public boolean needsCompaction(final Collection storeFiles, final List filesCompacting) { - ArrayList candidates = new ArrayList(storeFiles); + ArrayList candidates = new ArrayList<>(storeFiles); try { return !selectMinorCompaction(candidates, false, true).getFiles().isEmpty(); } catch (Exception e) { @@ -222,7 +222,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { // we put them in the same window as the last file in increasing order maxTimestampSeen = Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp() == null? Long.MIN_VALUE : storeFile.getMaximumTimestamp()); - storefileMaxTimestampPairs.add(new Pair(storeFile, maxTimestampSeen)); + storefileMaxTimestampPairs.add(new Pair<>(storeFile, maxTimestampSeen)); } Collections.reverse(storefileMaxTimestampPairs); @@ -299,7 +299,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { file.getMinimumTimestamp() == null ? Long.MAX_VALUE : file.getMinimumTimestamp()); } - List boundaries = new ArrayList(); + List boundaries = new ArrayList<>(); // Add startMillis of all windows between now and min timestamp for (CompactionWindow window = getIncomingWindow(now); @@ -317,7 +317,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { */ private static List getCompactionBoundariesForMinor(CompactionWindow window, boolean singleOutput) { - List boundaries = new ArrayList(); + List boundaries = new ArrayList<>(); boundaries.add(Long.MIN_VALUE); if (!singleOutput) { boundaries.add(window.startMillis()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index 8b5aa31941f..0bd917abf7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -53,7 +53,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { @Override protected final ArrayList applyCompactionPolicy(final ArrayList candidates, final boolean mayUseOffPeak, final boolean mightBeStuck) throws IOException { - return new ArrayList(applyCompactionPolicy(candidates, mightBeStuck, + return new ArrayList<>(applyCompactionPolicy(candidates, mightBeStuck, mayUseOffPeak, comConf.getMinFilesToCompact(), comConf.getMaxFilesToCompact())); } @@ -64,8 +64,8 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { ? comConf.getCompactionRatioOffPeak() : comConf.getCompactionRatio(); // Start off choosing nothing. - List bestSelection = new ArrayList(0); - List smallest = mightBeStuck ? new ArrayList(0) : null; + List bestSelection = new ArrayList<>(0); + List smallest = mightBeStuck ? new ArrayList<>(0) : null; long bestSize = 0; long smallestSize = Long.MAX_VALUE; @@ -117,12 +117,12 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { if (bestSelection.isEmpty() && mightBeStuck) { LOG.debug("Exploring compaction algorithm has selected " + smallest.size() + " files of size "+ smallestSize + " because the store might be stuck"); - return new ArrayList(smallest); + return new ArrayList<>(smallest); } LOG.debug("Exploring compaction algorithm has selected " + bestSelection.size() + " files of size " + bestSize + " starting at candidate #" + bestStart + " after considering " + opts + " permutations with " + optsInRatio + " in ratio"); - return new ArrayList(bestSelection); + return new ArrayList<>(bestSelection); } private boolean isBetterSelection(List bestSelection, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java index d3398980c8e..97b83875313 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java @@ -117,7 +117,7 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { private Collection getExpiredStores(Collection files, Collection filesCompacting) { long currentTime = EnvironmentEdgeManager.currentTime(); - Collection expiredStores = new ArrayList(); + Collection expiredStores = new ArrayList<>(); for(StoreFile sf: files){ // Check MIN_VERSIONS is in HStore removeUnneededFiles Long maxTs = sf.getReader().getMaxTimestamp(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index 77b0af8d48b..42b57a4891c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -43,7 +43,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { public List preSelectCompactionForCoprocessor(final Collection candidates, final List filesCompacting) { - return getCurrentEligibleFiles(new ArrayList(candidates), filesCompacting); + return getCurrentEligibleFiles(new ArrayList<>(candidates), filesCompacting); } /** @@ -56,7 +56,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { final List filesCompacting, final boolean isUserCompaction, final boolean mayUseOffPeak, final boolean forceMajor) throws IOException { // Preliminary compaction subject to filters - ArrayList candidateSelection = new ArrayList(candidateFiles); + ArrayList candidateSelection = new ArrayList<>(candidateFiles); // Stuck and not compacting enough (estimate). It is not guaranteed that we will be // able to compact more if stuck and compacting, because ratio policy excludes some // non-compacting files from consideration during compaction (see getCurrentEligibleFiles). diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index a553cf6fba6..0b66d3df974 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -68,7 +68,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { // We sincerely hope nobody is messing with us with their coprocessors. // If they do, they are very likely to shoot themselves in the foot. // We'll just exclude all the filesCompacting from the list. - ArrayList candidateFiles = new ArrayList(si.getStorefiles()); + ArrayList candidateFiles = new ArrayList<>(si.getStorefiles()); candidateFiles.removeAll(filesCompacting); return candidateFiles; } @@ -217,7 +217,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { LOG.debug("No good compaction is possible in any stripe"); return null; } - List filesToCompact = new ArrayList(bqSelection); + List filesToCompact = new ArrayList<>(bqSelection); // See if we can, and need to, split this stripe. int targetCount = 1; long targetKvs = Long.MAX_VALUE; @@ -246,7 +246,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { assert hasAllFiles; List l0Files = si.getLevel0Files(); LOG.debug("Adding " + l0Files.size() + " files to compaction to be able to drop deletes"); - ConcatenatedLists sfs = new ConcatenatedLists(); + ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addSublist(filesToCompact); sfs.addSublist(l0Files); req = new BoundaryStripeCompactionRequest(sfs, si.getStripeBoundaries()); @@ -345,7 +345,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { } LOG.debug("Merging " + bestLength + " stripes to delete expired store files"); int endIndex = bestStart + bestLength - 1; - ConcatenatedLists sfs = new ConcatenatedLists(); + ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addAllSublists(stripes.subList(bestStart, endIndex + 1)); SplitStripeCompactionRequest result = new SplitStripeCompactionRequest(sfs, si.getStartRow(bestStart), si.getEndRow(endIndex), 1, Long.MAX_VALUE); @@ -388,7 +388,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { splitCount += 1.0; } long kvCount = (long)(getTotalKvCount(files) / splitCount); - return new Pair(kvCount, (int)Math.ceil(splitCount)); + return new Pair<>(kvCount, (int)Math.ceil(splitCount)); } /** Stripe compaction request wrapper. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java index eb6e50333cb..c3976b5a0af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanDeleteTracker.java @@ -51,6 +51,9 @@ public class ScanDeleteTracker implements DeleteTracker { protected long familyStamp = 0L; protected SortedSet familyVersionStamps = new TreeSet(); protected Cell deleteCell = null; + protected byte[] deleteBuffer = null; + protected int deleteOffset = 0; + protected int deleteLength = 0; protected byte deleteType = 0; protected long deleteTimestamp = 0L; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index aa1205ad73b..7b43c3deec8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -271,7 +271,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { private final ExecutorCompletionService taskPool; private final ThreadPoolExecutor executor; private volatile boolean stopped; - private final List> futures = new ArrayList>(); + private final List> futures = new ArrayList<>(); private final String name; SnapshotSubprocedurePool(String name, Configuration conf, Abortable abortable) { @@ -283,10 +283,10 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS); this.name = name; executor = new ThreadPoolExecutor(threads, threads, keepAlive, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), new DaemonThreadFactory("rs(" + new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs(" + name + ")-snapshot-pool")); executor.allowCoreThreadTimeOut(true); - taskPool = new ExecutorCompletionService(executor); + taskPool = new ExecutorCompletionService<>(executor); } boolean hasTasks() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java index 8867611baca..ca76ad5b23a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java @@ -76,8 +76,7 @@ public abstract class PressureAwareThroughputController extends Configured imple private volatile double maxThroughput; private volatile double maxThroughputPerOperation; - protected final ConcurrentMap activeOperations = - new ConcurrentHashMap(); + protected final ConcurrentMap activeOperations = new ConcurrentHashMap<>(); @Override public abstract void setup(final RegionServerServices server); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index bf283f82063..f32d0edc665 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -155,7 +155,7 @@ public abstract class AbstractFSWAL implements WAL { protected final Configuration conf; /** Listeners that are called on WAL events. */ - protected final List listeners = new CopyOnWriteArrayList(); + protected final List listeners = new CopyOnWriteArrayList<>(); /** * Class that does accounting of sequenceids in WAL subsystem. Holds oldest outstanding sequence @@ -413,7 +413,7 @@ public abstract class AbstractFSWAL implements WAL { .toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", DEFAULT_WAL_SYNC_TIMEOUT_MS)); int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 200); // Presize our map of SyncFutures by handler objects. - this.syncFuturesByHandler = new ConcurrentHashMap(maxHandlersCount); + this.syncFuturesByHandler = new ConcurrentHashMap<>(maxHandlersCount); this.implClassName = getClass().getSimpleName(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 83d93fe8be7..c3e96cfa497 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -180,7 +180,7 @@ public class AsyncFSWAL extends AbstractFSWAL { private final Deque unackedAppends = new ArrayDeque<>(); - private final SortedSet syncFutures = new TreeSet(SEQ_COMPARATOR); + private final SortedSet syncFutures = new TreeSet<>(SEQ_COMPARATOR); // the highest txid of WAL entries being processed private long highestProcessedAppendTxid; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java index a0ac8a21f67..e1f7b8f5ef0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.java @@ -156,7 +156,7 @@ public class AsyncProtobufLogWriter extends AbstractProtobufLogWriter } private long write(Consumer> action) throws IOException { - CompletableFuture future = new CompletableFuture(); + CompletableFuture future = new CompletableFuture<>(); eventLoop.execute(() -> action.accept(future)); try { return future.get().longValue(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index f5a338239c8..f0e29c127d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -226,7 +226,7 @@ public class FSHLog extends AbstractFSWAL { String hostingThreadName = Thread.currentThread().getName(); // Using BlockingWaitStrategy. Stuff that is going on here takes so long it makes no sense // spinning as other strategies do. - this.disruptor = new Disruptor(RingBufferTruck::new, + this.disruptor = new Disruptor<>(RingBufferTruck::new, getPreallocatedEventCount(), Threads.getNamedThreadFactory(hostingThreadName + ".append"), ProducerType.MULTI, new BlockingWaitStrategy()); // Advance the ring buffer sequence so that it starts from 1 instead of 0, @@ -489,7 +489,7 @@ public class FSHLog extends AbstractFSWAL { // the meta table when succesful (i.e. sync), closing handlers -- etc. These are usually // much fewer in number than the user-space handlers so Q-size should be user handlers plus // some space for these other handlers. Lets multiply by 3 for good-measure. - this.syncFutures = new LinkedBlockingQueue(maxHandlersCount * 3); + this.syncFutures = new LinkedBlockingQueue<>(maxHandlersCount * 3); } void offer(final long sequence, final SyncFuture[] syncFutures, final int syncFutureCount) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index d10220da139..f445059b15b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -88,7 +88,7 @@ public class ProtobufLogReader extends ReaderBase { // maximum size of the wal Trailer in bytes. If a user writes/reads a trailer with size larger // than this size, it is written/read respectively, with a WARN message in the log. protected int trailerWarnSize; - private static List writerClsNames = new ArrayList(); + private static List writerClsNames = new ArrayList<>(); static { writerClsNames.add(ProtobufLogWriter.class.getSimpleName()); writerClsNames.add(AsyncProtobufLogWriter.class.getSimpleName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java index 62bc96efa8b..f9ebed7a1c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java @@ -44,7 +44,7 @@ public class SecureProtobufLogReader extends ProtobufLogReader { private static final Log LOG = LogFactory.getLog(SecureProtobufLogReader.class); private Decryptor decryptor = null; - private static List writerClsNames = new ArrayList(); + private static List writerClsNames = new ArrayList<>(); static { writerClsNames.add(ProtobufLogWriter.class.getSimpleName()); writerClsNames.add(SecureProtobufLogWriter.class.getSimpleName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 8226b82d28c..cd73eb3e21f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -154,7 +154,7 @@ class SequenceIdAccounting { */ Map resetHighest() { Map old = this.highestSequenceIds; - this.highestSequenceIds = new HashMap(); + this.highestSequenceIds = new HashMap<>(); return old; } @@ -422,7 +422,7 @@ class SequenceIdAccounting { long lowest = getLowestSequenceId(m); if (lowest != HConstants.NO_SEQNUM && lowest <= e.getValue()) { if (toFlush == null) { - toFlush = new ArrayList(); + toFlush = new ArrayList<>(); } toFlush.add(e.getKey()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index f79fa0101b9..7a8b3d5d096 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -130,7 +130,7 @@ public class WALEdit implements Writable, HeapSize { public WALEdit(int cellCount, boolean isReplay) { this.isReplay = isReplay; - cells = new ArrayList(cellCount); + cells = new ArrayList<>(cellCount); } /** @@ -222,7 +222,7 @@ public class WALEdit implements Writable, HeapSize { int numFamilies = in.readInt(); if (numFamilies > 0) { if (scopes == null) { - scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); } for (int i = 0; i < numFamilies; i++) { byte[] fam = Bytes.readByteArray(in); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 4dee9f16abd..f4512076164 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -111,7 +111,7 @@ public class WALEditsReplaySink { if (entriesByRegion.containsKey(loc.getRegionInfo())) { regionEntries = entriesByRegion.get(loc.getRegionInfo()); } else { - regionEntries = new ArrayList(); + regionEntries = new ArrayList<>(); entriesByRegion.put(loc.getRegionInfo(), regionEntries); } regionEntries.add(entry); @@ -160,7 +160,7 @@ public class WALEditsReplaySink { try { RpcRetryingCallerFactory factory = RpcRetryingCallerFactory.instantiate(conf, null); ReplayServerCallable callable = - new ReplayServerCallable(this.conn, this.rpcControllerFactory, + new ReplayServerCallable<>(this.conn, this.rpcControllerFactory, this.tableName, regionLoc, entries); factory. newCaller().callWithRetries(callable, this.replayTimeout); } catch (IOException ie) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java index 1045c1dbd25..86fc1fa7628 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java @@ -52,7 +52,7 @@ public class BulkLoadCellFilter { } List storesList = bld.getStoresList(); // Copy the StoreDescriptor list and update it as storesList is a unmodifiableList - List copiedStoresList = new ArrayList(storesList); + List copiedStoresList = new ArrayList<>(storesList); Iterator copiedStoresListIterator = copiedStoresList.iterator(); boolean anyStoreRemoved = false; while (copiedStoresListIterator.hasNext()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java index 1d67faade39..f858e5d9190 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ChainWALEntryFilter.java @@ -43,7 +43,7 @@ public class ChainWALEntryFilter implements WALEntryFilter { } public ChainWALEntryFilter(List filters) { - ArrayList rawFilters = new ArrayList(filters.size()); + ArrayList rawFilters = new ArrayList<>(filters.size()); // flatten the chains for (WALEntryFilter filter : filters) { if (filter instanceof ChainWALEntryFilter) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 1a603e0c5c8..23df804ac88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -52,7 +52,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private ZooKeeperWatcher zkw = null; // FindBugs: MT_CORRECTNESS - private List regionServers = new ArrayList(0); + private List regionServers = new ArrayList<>(0); private long lastRegionServerUpdate; protected void disconnect() { @@ -151,7 +151,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint if (children == null) { return Collections.emptyList(); } - List addresses = new ArrayList(children.size()); + List addresses = new ArrayList<>(children.size()); for (String child : children) { addresses.add(ServerName.parseServerName(child)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index 9a1e2bc876a..2bedbfd17b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -80,8 +80,8 @@ public class DumpReplicationQueues extends Configured implements Tool { private long numWalsNotFound; public DumpReplicationQueues() { - deadRegionServers = new ArrayList(); - deletedQueues = new ArrayList(); + deadRegionServers = new ArrayList<>(); + deletedQueues = new ArrayList<>(); peersQueueSize = AtomicLongMap.create(); totalSizeOfWALs = 0; numWalsNotFound = 0; @@ -162,7 +162,7 @@ public class DumpReplicationQueues extends Configured implements Tool { public int run(String[] args) throws Exception { int errCode = -1; - LinkedList argv = new LinkedList(); + LinkedList argv = new LinkedList<>(); argv.addAll(Arrays.asList(args)); DumpOptions opts = parseOpts(argv); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index de3159fae19..ba12d53258c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -127,7 +127,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT); this.exec = new ThreadPoolExecutor(maxThreads, maxThreads, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue()); + new LinkedBlockingQueue<>()); this.exec.allowCoreThreadTimeOut(true); this.abortable = ctx.getAbortable(); @@ -190,7 +190,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi */ @Override public boolean replicate(ReplicateContext replicateContext) { - CompletionService pool = new ExecutorCompletionService(this.exec); + CompletionService pool = new ExecutorCompletionService<>(this.exec); List entries = replicateContext.getEntries(); String walGroupId = replicateContext.getWalGroupId(); int sleepMultiplier = 1; @@ -212,12 +212,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // and number of current sinks int n = Math.min(Math.min(this.maxThreads, entries.size()/100+1), numSinks); - List> entryLists = new ArrayList>(n); + List> entryLists = new ArrayList<>(n); if (n == 1) { entryLists.add(entries); } else { for (int i=0; i(entries.size()/n+1)); + entryLists.add(new ArrayList<>(entries.size()/n+1)); } // now group by region for (Entry e : entries) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java index 35aa1fbb32f..c091b44e74f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java @@ -109,7 +109,7 @@ public class HFileReplicator { builder.setNameFormat("HFileReplicationCallable-%1$d"); this.exec = new ThreadPoolExecutor(maxCopyThreads, maxCopyThreads, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(), builder.build()); + new LinkedBlockingQueue<>(), builder.build()); this.exec.allowCoreThreadTimeOut(true); this.copiesPerThread = conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, @@ -144,7 +144,7 @@ public class HFileReplicator { Table table = this.connection.getTable(tableName); // Prepare collection of queue of hfiles to be loaded(replicated) - Deque queue = new LinkedList(); + Deque queue = new LinkedList<>(); loadHFiles.prepareHFileQueue(stagingDir, table, queue, false); if (queue.isEmpty()) { @@ -221,7 +221,7 @@ public class HFileReplicator { } private Map copyHFilesToStagingDir() throws IOException { - Map mapOfCopiedHFiles = new HashMap(); + Map mapOfCopiedHFiles = new HashMap<>(); Pair> familyHFilePathsPair; List hfilePaths; byte[] family; @@ -270,7 +270,7 @@ public class HFileReplicator { totalNoOfHFiles = hfilePaths.size(); // For each list of hfile paths for the family - List> futures = new ArrayList>(); + List> futures = new ArrayList<>(); Callable c; Future future; int currentCopied = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index a647d03aac6..7a9ef9fe018 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -39,7 +39,7 @@ public class MetricsSource implements BaseSource { private static final Log LOG = LogFactory.getLog(MetricsSource.class); // tracks last shipped timestamp for each wal group - private Map lastTimeStamps = new HashMap(); + private Map lastTimeStamps = new HashMap<>(); private int lastQueueSize = 0; private long lastHFileRefsQueueSize = 0; private String id; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index dc4fad09707..3e0de45f724 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -238,7 +238,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60); LinkedBlockingQueue workQueue = - new LinkedBlockingQueue(maxThreads * + new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor( @@ -527,8 +527,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { return; } - ArrayList> tasks - = new ArrayList>(locations.size() - 1); + ArrayList> tasks = new ArrayList<>(locations.size() - 1); // All passed entries should belong to one region because it is coming from the EntryBuffers // split per region. But the regions might split and merge (unlike log recovery case). @@ -543,8 +542,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { rpcControllerFactory, tableName, location, regionInfo, row, entries, sink.getSkippedEditsCounter()); Future task = pool.submit( - new RetryingRpcCallable(rpcRetryingCallerFactory, - callable, operationTimeout)); + new RetryingRpcCallable<>(rpcRetryingCallerFactory, callable, operationTimeout)); tasks.add(task); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index d3f9ba2cdb4..9cc9c7cfd7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -363,7 +363,7 @@ public class Replication extends WALActionsListener.Base implements } private void buildReplicationLoad() { - List sourceMetricsList = new ArrayList(); + List sourceMetricsList = new ArrayList<>(); // get source List sources = this.replicationManager.getSources(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java index b02b2121653..ef976877ca0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -68,8 +68,7 @@ public class ReplicationLoad { this.replicationLoadSink = rLoadSinkBuild.build(); // build the SourceLoad List - Map replicationLoadSourceMap = - new HashMap(); + Map replicationLoadSourceMap = new HashMap<>(); for (MetricsSource sm : this.sourceMetricsList) { // Get the actual peer id String peerId = sm.getPeerID(); @@ -111,8 +110,7 @@ public class ReplicationLoad { replicationLoadSourceMap.put(peerId, rLoadSourceBuild.build()); } - this.replicationLoadSourceList = new ArrayList( - replicationLoadSourceMap.values()); + this.replicationLoadSourceList = new ArrayList<>(replicationLoadSourceMap.values()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 71f9f3ddb80..a3d6d133c79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -152,8 +152,7 @@ public class ReplicationSink { long totalReplicated = 0; // Map of table => list of Rows, grouped by cluster id, we only want to flushCommits once per // invocation of this method per table and cluster id. - Map, List>> rowMap = - new TreeMap, List>>(); + Map, List>> rowMap = new TreeMap<>(); // Map of table name Vs list of pair of family and list of hfile paths from its namespace Map>>> bulkLoadHFileMap = null; @@ -173,7 +172,7 @@ public class ReplicationSink { // Handle bulk load hfiles replication if (CellUtil.matchingQualifier(cell, WALEdit.BULK_LOAD)) { if (bulkLoadHFileMap == null) { - bulkLoadHFileMap = new HashMap>>>(); + bulkLoadHFileMap = new HashMap<>(); } buildBulkLoadHFileMap(bulkLoadHFileMap, table, cell); } else { @@ -184,7 +183,7 @@ public class ReplicationSink { CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); - List clusterIds = new ArrayList(entry.getKey().getClusterIdsList().size()); + List clusterIds = new ArrayList<>(entry.getKey().getClusterIdsList().size()); for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) { clusterIds.add(toUUID(clusterId)); } @@ -275,20 +274,18 @@ public class ReplicationSink { private void addFamilyAndItsHFilePathToTableInMap(byte[] family, String pathToHfileFromNS, List>> familyHFilePathsList) { - List hfilePaths = new ArrayList(1); + List hfilePaths = new ArrayList<>(1); hfilePaths.add(pathToHfileFromNS); - familyHFilePathsList.add(new Pair>(family, hfilePaths)); + familyHFilePathsList.add(new Pair<>(family, hfilePaths)); } private void addNewTableEntryInMap( final Map>>> bulkLoadHFileMap, byte[] family, String pathToHfileFromNS, String tableName) { - List hfilePaths = new ArrayList(1); + List hfilePaths = new ArrayList<>(1); hfilePaths.add(pathToHfileFromNS); - Pair> newFamilyHFilePathsPair = - new Pair>(family, hfilePaths); - List>> newFamilyHFilePathsList = - new ArrayList>>(); + Pair> newFamilyHFilePathsPair = new Pair<>(family, hfilePaths); + List>> newFamilyHFilePathsList = new ArrayList<>(); newFamilyHFilePathsList.add(newFamilyHFilePathsPair); bulkLoadHFileMap.put(tableName, newFamilyHFilePathsList); } @@ -327,12 +324,12 @@ public class ReplicationSink { private List addToHashMultiMap(Map>> map, K1 key1, K2 key2, V value) { Map> innerMap = map.get(key1); if (innerMap == null) { - innerMap = new HashMap>(); + innerMap = new HashMap<>(); map.put(key1, innerMap); } List values = innerMap.get(key2); if (values == null) { - values = new ArrayList(); + values = new ArrayList<>(); innerMap.put(key2, values); } values.add(value); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index d3f6d3585c5..72da9bd4cb0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -94,8 +94,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf private static final Log LOG = LogFactory.getLog(ReplicationSource.class); // Queues of logs to process, entry in format of walGroupId->queue, // each presents a queue for one wal group - private Map> queues = - new HashMap>(); + private Map> queues = new HashMap<>(); // per group queue size, keep no more than this number of logs in each wal group private int queueSizePerGroup; private ReplicationQueues replicationQueues; @@ -140,8 +139,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf private ReplicationThrottler throttler; private long defaultBandwidth; private long currentBandwidth; - private ConcurrentHashMap workerThreads = - new ConcurrentHashMap(); + private ConcurrentHashMap workerThreads = new ConcurrentHashMap<>(); private AtomicLong totalBufferUsed; @@ -209,7 +207,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf String logPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(log.getName()); PriorityBlockingQueue queue = queues.get(logPrefix); if (queue == null) { - queue = new PriorityBlockingQueue(queueSizePerGroup, new LogsComparator()); + queue = new PriorityBlockingQueue<>(queueSizePerGroup, new LogsComparator()); queues.put(logPrefix, queue); if (this.sourceRunning) { // new wal group observed after source startup, start a new worker thread to track it diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 5cb7d75d4fa..a38e264e641 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -147,14 +147,14 @@ public class ReplicationSourceManager implements ReplicationListener { final Path oldLogDir, final UUID clusterId) throws IOException { //CopyOnWriteArrayList is thread-safe. //Generally, reading is more than modifying. - this.sources = new CopyOnWriteArrayList(); + this.sources = new CopyOnWriteArrayList<>(); this.replicationQueues = replicationQueues; this.replicationPeers = replicationPeers; this.replicationTracker = replicationTracker; this.server = server; - this.walsById = new HashMap>>(); - this.walsByIdRecoveredQueues = new ConcurrentHashMap>>(); - this.oldsources = new CopyOnWriteArrayList(); + this.walsById = new HashMap<>(); + this.walsByIdRecoveredQueues = new ConcurrentHashMap<>(); + this.oldsources = new CopyOnWriteArrayList<>(); this.conf = conf; this.fs = fs; this.logDir = logDir; @@ -170,8 +170,7 @@ public class ReplicationSourceManager implements ReplicationListener { // use a short 100ms sleep since this could be done inline with a RS startup // even if we fail, other region servers can take care of it this.executor = new ThreadPoolExecutor(nbWorkers, nbWorkers, - 100, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue()); + 100, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); tfb.setNameFormat("ReplicationExecutor-%d"); tfb.setDaemon(true); @@ -277,7 +276,7 @@ public class ReplicationSourceManager implements ReplicationListener { this.replicationPeers, server, id, this.clusterId, peerConfig, peer); synchronized (this.walsById) { this.sources.add(src); - Map> walsByGroup = new HashMap>(); + Map> walsByGroup = new HashMap<>(); this.walsById.put(id, walsByGroup); // Add the latest wal to that source's queue synchronized (latestPaths) { @@ -285,7 +284,7 @@ public class ReplicationSourceManager implements ReplicationListener { for (Path logPath : latestPaths) { String name = logPath.getName(); String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(name); - SortedSet logs = new TreeSet(); + SortedSet logs = new TreeSet<>(); logs.add(name); walsByGroup.put(walPrefix, logs); try { @@ -423,7 +422,7 @@ public class ReplicationSourceManager implements ReplicationListener { if (!existingPrefix) { // The new log belongs to a new group, add it into this peer LOG.debug("Start tracking logs for wal group " + logPrefix + " for peer " + peerId); - SortedSet wals = new TreeSet(); + SortedSet wals = new TreeSet<>(); wals.add(logName); walsByPrefix.put(logPrefix, wals); } @@ -570,8 +569,7 @@ public class ReplicationSourceManager implements ReplicationListener { + sources.size() + " and another " + oldsources.size() + " that were recovered"); String terminateMessage = "Replication stream was removed by a user"; - List oldSourcesToDelete = - new ArrayList(); + List oldSourcesToDelete = new ArrayList<>(); // synchronized on oldsources to avoid adding recovered source for the to-be-removed peer // see NodeFailoverWorker.run synchronized (oldsources) { @@ -589,7 +587,7 @@ public class ReplicationSourceManager implements ReplicationListener { LOG.info("Number of deleted recovered sources for " + id + ": " + oldSourcesToDelete.size()); // Now look for the one on this cluster - List srcToRemove = new ArrayList(); + List srcToRemove = new ArrayList<>(); // synchronize on replicationPeers to avoid adding source for the to-be-removed peer synchronized (this.replicationPeers) { for (ReplicationSourceInterface src : this.sources) { @@ -735,13 +733,13 @@ public class ReplicationSourceManager implements ReplicationListener { continue; } // track sources in walsByIdRecoveredQueues - Map> walsByGroup = new HashMap>(); + Map> walsByGroup = new HashMap<>(); walsByIdRecoveredQueues.put(peerId, walsByGroup); for (String wal : walsSet) { String walPrefix = AbstractFSWALProvider.getWALPrefixFromWALName(wal); SortedSet wals = walsByGroup.get(walPrefix); if (wals == null) { - wals = new TreeSet(); + wals = new TreeSet<>(); walsByGroup.put(walPrefix, wals); } wals.add(wal); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index f06330c7040..c1aad936b51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -286,7 +286,7 @@ public class AccessControlLists { ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, ACL_KEY_DELIMITER, columnName)))); - Set qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); + Set qualifierSet = new TreeSet<>(Bytes.BYTES_COMPARATOR); ResultScanner scanner = null; try { scanner = table.getScanner(scan); @@ -384,8 +384,7 @@ public class AccessControlLists { throw new IOException("Can only load permissions from "+ACL_TABLE_NAME); } - Map> allPerms = - new TreeMap>(Bytes.BYTES_RAWCOMPARATOR); + Map> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); // do a full scan of _acl_ table @@ -397,7 +396,7 @@ public class AccessControlLists { iScanner = aclRegion.getScanner(scan); while (true) { - List row = new ArrayList(); + List row = new ArrayList<>(); boolean hasNext = iScanner.next(row); ListMultimap perms = ArrayListMultimap.create(); @@ -436,8 +435,7 @@ public class AccessControlLists { */ static Map> loadAll( Configuration conf) throws IOException { - Map> allPerms = - new TreeMap>(Bytes.BYTES_RAWCOMPARATOR); + Map> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); // do a full scan of _acl_, filtering on only first table region rows @@ -530,7 +528,7 @@ public class AccessControlLists { ListMultimap allPerms = getPermissions( conf, entryName, null); - List perms = new ArrayList(); + List perms = new ArrayList<>(); if(isNamespaceEntry(entryName)) { // Namespace for (Map.Entry entry : allPerms.entries()) { @@ -591,8 +589,7 @@ public class AccessControlLists { //Handle namespace entry if(isNamespaceEntry(entryName)) { - return new Pair(username, - new TablePermission(Bytes.toString(fromNamespaceEntry(entryName)), value)); + return new Pair<>(username, new TablePermission(Bytes.toString(fromNamespaceEntry(entryName)), value)); } //Handle table and global entry @@ -612,8 +609,7 @@ public class AccessControlLists { } } - return new Pair(username, - new TablePermission(TableName.valueOf(entryName), permFamily, permQualifier, value)); + return new Pair<>(username, new TablePermission(TableName.valueOf(entryName), permFamily, permQualifier, value)); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 814f2092c7e..64ac90032f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -259,8 +259,7 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS */ void updateACL(RegionCoprocessorEnvironment e, final Map> familyMap) { - Set entries = - new TreeSet(Bytes.BYTES_RAWCOMPARATOR); + Set entries = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR); for (Map.Entry> f : familyMap.entrySet()) { List cells = f.getValue(); for (Cell cell: cells) { @@ -793,7 +792,7 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS // This Map is identical to familyMap. The key is a BR rather than byte[]. // It will be easy to do gets over this new Map as we can create get keys over the Cell cf by // new SimpleByteRange(cell.familyArray, cell.familyOffset, cell.familyLen) - Map> familyMap1 = new HashMap>(); + Map> familyMap1 = new HashMap<>(); for (Entry> entry : familyMap.entrySet()) { if (entry.getValue() instanceof List) { familyMap1.put(new SimpleMutableByteRange(entry.getKey()), (List) entry.getValue()); @@ -882,7 +881,7 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS List newCells = Lists.newArrayList(); for (Cell cell: e.getValue()) { // Prepend the supplied perms in a new ACL tag to an update list of tags for the cell - List tags = new ArrayList(); + List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(AccessControlLists.ACL_TAG_TYPE, perms)); Iterator tagIterator = CellUtil.tagsIterator(cell); while (tagIterator.hasNext()) { @@ -990,7 +989,7 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS public void preCreateTable(ObserverContext c, HTableDescriptor desc, HRegionInfo[] regions) throws IOException { Set families = desc.getFamiliesKeys(); - Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] family: families) { familyMap.put(family, null); } @@ -2407,8 +2406,7 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS tperm.getTableName())); } - Map> familyMap = - new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); if (tperm.getFamily() != null) { if (tperm.getQualifier() != null) { Set qualifiers = Sets.newTreeSet(Bytes.BYTES_COMPARATOR); @@ -2515,7 +2513,7 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS return null; } - Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, qualifier != null ? ImmutableSet.of(qualifier) : null); return familyMap; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java index eae9e4e47cd..0d539ced5a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java @@ -103,10 +103,10 @@ public class TableAuthManager implements Closeable { private volatile PermissionCache globalCache; private ConcurrentSkipListMap> tableCache = - new ConcurrentSkipListMap>(); + new ConcurrentSkipListMap<>(); private ConcurrentSkipListMap> nsCache = - new ConcurrentSkipListMap>(); + new ConcurrentSkipListMap<>(); private Configuration conf; private ZKPermissionWatcher zkperms; @@ -143,7 +143,7 @@ public class TableAuthManager implements Closeable { throw new IOException("Unable to obtain the current user, " + "authorization checks for internal operations will not work correctly!"); } - PermissionCache newCache = new PermissionCache(); + PermissionCache newCache = new PermissionCache<>(); String currentUser = user.getShortName(); // the system user is always included @@ -239,7 +239,7 @@ public class TableAuthManager implements Closeable { */ private void updateTableCache(TableName table, ListMultimap tablePerms) { - PermissionCache newTablePerms = new PermissionCache(); + PermissionCache newTablePerms = new PermissionCache<>(); for (Map.Entry entry : tablePerms.entries()) { if (AuthUtil.isGroupPrincipal(entry.getKey())) { @@ -263,7 +263,7 @@ public class TableAuthManager implements Closeable { */ private void updateNsCache(String namespace, ListMultimap tablePerms) { - PermissionCache newTablePerms = new PermissionCache(); + PermissionCache newTablePerms = new PermissionCache<>(); for (Map.Entry entry : tablePerms.entries()) { if (AuthUtil.isGroupPrincipal(entry.getKey())) { @@ -734,8 +734,7 @@ public class TableAuthManager implements Closeable { return mtime.get(); } - private static Map managerMap = - new HashMap(); + private static Map managerMap = new HashMap<>(); private static Map refCount = new HashMap<>(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index f21e8772c15..3324b90a3d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -58,8 +58,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable TableAuthManager authManager; String aclZNode; CountDownLatch initialized = new CountDownLatch(1); - AtomicReference> nodes = - new AtomicReference>(null); + AtomicReference> nodes = new AtomicReference<>(null); ExecutorService executor; public ZKPermissionWatcher(ZooKeeperWatcher watcher, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 26448b1a875..a569cf3073c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -70,8 +70,7 @@ public class AuthenticationTokenSecretManager private LeaderElector leaderElector; private ZKClusterId clusterId; - private Map allKeys = - new ConcurrentHashMap(); + private Map allKeys = new ConcurrentHashMap<>(); private AuthenticationKey currentKey; private int idSeq; @@ -181,8 +180,7 @@ public class AuthenticationTokenSecretManager public Token generateToken(String username) { AuthenticationTokenIdentifier ident = new AuthenticationTokenIdentifier(username); - Token token = - new Token(ident, this); + Token token = new Token<>(ident, this); if (clusterId.hasId()) { token.setService(new Text(clusterId.getId())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index f767ed36654..1d424509f77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -314,7 +314,7 @@ public class TokenUtil { * @return the Token instance */ public static Token toToken(AuthenticationProtos.Token proto) { - return new Token( + return new Token<>( proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, proto.hasPassword() ? proto.getPassword().toByteArray() : null, AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index 9abb3a25fa5..d4a5627bec6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -151,10 +151,10 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService protected List> getExistingLabelsWithAuths() throws IOException { Scan scan = new Scan(); RegionScanner scanner = labelsRegion.getScanner(scan); - List> existingLabels = new ArrayList>(); + List> existingLabels = new ArrayList<>(); try { while (true) { - List cells = new ArrayList(); + List cells = new ArrayList<>(); scanner.next(cells); if (cells.isEmpty()) { break; @@ -169,8 +169,8 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService protected Pair, Map>> extractLabelsAndAuths( List> labelDetails) { - Map labels = new HashMap(); - Map> userAuths = new HashMap>(); + Map labels = new HashMap<>(); + Map> userAuths = new HashMap<>(); for (List cells : labelDetails) { for (Cell cell : cells) { if (CellUtil.matchingQualifier(cell, LABEL_QUALIFIER)) { @@ -183,14 +183,14 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService cell.getQualifierLength()); List auths = userAuths.get(user); if (auths == null) { - auths = new ArrayList(); + auths = new ArrayList<>(); userAuths.put(user, auths); } auths.add(CellUtil.getRowAsInt(cell)); } } } - return new Pair, Map>>(labels, userAuths); + return new Pair<>(labels, userAuths); } protected void addSystemLabel(Region region, Map labels, @@ -207,7 +207,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService public OperationStatus[] addLabels(List labels) throws IOException { assert labelsRegion != null; OperationStatus[] finalOpStatus = new OperationStatus[labels.size()]; - List puts = new ArrayList(labels.size()); + List puts = new ArrayList<>(labels.size()); int i = 0; for (byte[] label : labels) { String labelStr = Bytes.toString(label); @@ -235,7 +235,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService public OperationStatus[] setAuths(byte[] user, List authLabels) throws IOException { assert labelsRegion != null; OperationStatus[] finalOpStatus = new OperationStatus[authLabels.size()]; - List puts = new ArrayList(authLabels.size()); + List puts = new ArrayList<>(authLabels.size()); int i = 0; for (byte[] auth : authLabels) { String authStr = Bytes.toString(auth); @@ -269,7 +269,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService else { currentAuths = this.getUserAuths(user, true); } - List deletes = new ArrayList(authLabels.size()); + List deletes = new ArrayList<>(authLabels.size()); int i = 0; for (byte[] authLabel : authLabels) { String authLabelStr = Bytes.toString(authLabel); @@ -334,10 +334,10 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion, new Authorizations(SYSTEM_LABEL)); s.setFilter(filter); - ArrayList auths = new ArrayList(); + ArrayList auths = new ArrayList<>(); RegionScanner scanner = this.labelsRegion.getScanner(s); try { - List results = new ArrayList(1); + List results = new ArrayList<>(1); while (true) { scanner.next(results); if (results.isEmpty()) break; @@ -371,10 +371,10 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService Filter filter = VisibilityUtils.createVisibilityLabelFilter(this.labelsRegion, new Authorizations(SYSTEM_LABEL)); s.setFilter(filter); - Set auths = new HashSet(); + Set auths = new HashSet<>(); RegionScanner scanner = this.labelsRegion.getScanner(s); try { - List results = new ArrayList(1); + List results = new ArrayList<>(1); while (true) { scanner.next(results); if (results.isEmpty()) break; @@ -389,7 +389,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } finally { scanner.close(); } - return new ArrayList(auths); + return new ArrayList<>(auths); } @Override @@ -401,7 +401,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService labels.remove(SYSTEM_LABEL); if (regex != null) { Pattern pattern = Pattern.compile(regex); - ArrayList matchedLabels = new ArrayList(); + ArrayList matchedLabels = new ArrayList<>(); for (String label : labels.keySet()) { if (pattern.matcher(label).matches()) { matchedLabels.add(label); @@ -409,13 +409,13 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } return matchedLabels; } - return new ArrayList(labels.keySet()); + return new ArrayList<>(labels.keySet()); } @Override public List createVisibilityExpTags(String visExpression, boolean withSerializationFormat, boolean checkAuths) throws IOException { - Set auths = new HashSet(); + Set auths = new HashSet<>(); if (checkAuths) { User user = VisibilityUtils.getActiveUser(); auths.addAll(this.labelsCache.getUserAuthsAsOrdinals(user.getShortName())); @@ -461,7 +461,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService try { // null authorizations to be handled inside SLG impl. authLabels = scanLabelGenerator.getLabels(VisibilityUtils.getActiveUser(), authorizations); - authLabels = (authLabels == null) ? new ArrayList() : authLabels; + authLabels = (authLabels == null) ? new ArrayList<>() : authLabels; authorizations = new Authorizations(authLabels); } catch (Throwable t) { LOG.error(t); @@ -605,7 +605,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService } private static List> sortTagsBasedOnOrdinal(List tags) throws IOException { - List> fullTagsList = new ArrayList>(); + List> fullTagsList = new ArrayList<>(); for (Tag tag : tags) { if (tag.getType() == VISIBILITY_TAG_TYPE) { getSortedTagOrdinals(fullTagsList, tag); @@ -616,7 +616,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService private static void getSortedTagOrdinals(List> fullTagsList, Tag tag) throws IOException { - List tagsOrdinalInSortedOrder = new ArrayList(); + List tagsOrdinalInSortedOrder = new ArrayList<>(); int offset = tag.getValueOffset(); int endOffset = offset + tag.getValueLength(); while (offset < endOffset) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java index 2c7d2537f80..2126ee721b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefinedSetFilterScanLabelGenerator.java @@ -62,18 +62,18 @@ public class DefinedSetFilterScanLabelGenerator implements ScanLabelGenerator { if (authorizations != null) { List labels = authorizations.getLabels(); String userName = user.getShortName(); - Set auths = new HashSet(); + Set auths = new HashSet<>(); auths.addAll(this.labelsCache.getUserAuths(userName)); auths.addAll(this.labelsCache.getGroupAuths(user.getGroupNames())); - return dropLabelsNotInUserAuths(labels, new ArrayList(auths), userName); + return dropLabelsNotInUserAuths(labels, new ArrayList<>(auths), userName); } return null; } private List dropLabelsNotInUserAuths(List labels, List auths, String userName) { - List droppedLabels = new ArrayList(); - List passedLabels = new ArrayList(labels.size()); + List droppedLabels = new ArrayList<>(); + List passedLabels = new ArrayList<>(labels.size()); for (String label : labels) { if (auths.contains(label)) { passedLabels.add(label); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java index dd0497c0aa2..177f4d2ecf6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/EnforcingScanLabelGenerator.java @@ -62,10 +62,10 @@ public class EnforcingScanLabelGenerator implements ScanLabelGenerator { if (authorizations != null) { LOG.warn("Dropping authorizations requested by user " + userName + ": " + authorizations); } - Set auths = new HashSet(); + Set auths = new HashSet<>(); auths.addAll(this.labelsCache.getUserAuths(userName)); auths.addAll(this.labelsCache.getGroupAuths(user.getGroupNames())); - return new ArrayList(auths); + return new ArrayList<>(auths); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java index db3caffb0bc..2b9a56ee426 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java @@ -40,7 +40,7 @@ public class ExpressionParser { private static final char DOUBLE_QUOTES = '"'; public ExpressionNode parse(String expS) throws ParseException { expS = expS.trim(); - Stack expStack = new Stack(); + Stack expStack = new Stack<>(); int index = 0; byte[] exp = Bytes.toBytes(expS); int endPos = exp.length; @@ -68,7 +68,7 @@ public class ExpressionParser { // We have to rewrite the expression within double quotes as incase of expressions // with escape characters we may have to avoid them as the original expression did // not have them - List list = new ArrayList(); + List list = new ArrayList<>(); while (index < endPos && !endDoubleQuotesFound(exp[index])) { if (exp[index] == '\\') { index++; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java index 1f90682f3fc..f4cf7629099 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/FeedUserAuthScanLabelGenerator.java @@ -65,10 +65,10 @@ public class FeedUserAuthScanLabelGenerator implements ScanLabelGenerator { if (authorizations == null || authorizations.getLabels() == null || authorizations.getLabels().isEmpty()) { String userName = user.getShortName(); - Set auths = new HashSet(); + Set auths = new HashSet<>(); auths.addAll(this.labelsCache.getUserAuths(userName)); auths.addAll(this.labelsCache.getGroupAuths(user.getGroupNames())); - return new ArrayList(auths); + return new ArrayList<>(auths); } return authorizations.getLabels(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index fb685bce387..476921b3706 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -145,7 +145,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, boolean authorizationEnabled; // Add to this list if there are any reserved tag types - private static ArrayList RESERVED_VIS_TAG_TYPES = new ArrayList(); + private static ArrayList RESERVED_VIS_TAG_TYPES = new ArrayList<>(); static { RESERVED_VIS_TAG_TYPES.add(TagType.VISIBILITY_TAG_TYPE); RESERVED_VIS_TAG_TYPES.add(TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE); @@ -328,7 +328,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, return; } // TODO this can be made as a global LRU cache at HRS level? - Map> labelCache = new HashMap>(); + Map> labelCache = new HashMap<>(); for (int i = 0; i < miniBatchOp.size(); i++) { Mutation m = miniBatchOp.getOperation(i); CellVisibility cellVisibility = null; @@ -341,7 +341,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, } boolean sanityFailure = false; boolean modifiedTagFound = false; - Pair pair = new Pair(false, null); + Pair pair = new Pair<>(false, null); for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { pair = checkForReservedVisibilityTagPresence(cellScanner.current(), pair); if (!pair.getFirst()) { @@ -381,7 +381,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, } } if (visibilityTags != null) { - List updatedCells = new ArrayList(); + List updatedCells = new ArrayList<>(); for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); List tags = CellUtil.getTags(cell); @@ -427,7 +427,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, } // The check for checkForReservedVisibilityTagPresence happens in preBatchMutate happens. // It happens for every mutation and that would be enough. - List visibilityTags = new ArrayList(); + List visibilityTags = new ArrayList<>(); if (cellVisibility != null) { String labelsExp = cellVisibility.getExpression(); try { @@ -474,7 +474,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, private Pair checkForReservedVisibilityTagPresence(Cell cell, Pair pair) throws IOException { if (pair == null) { - pair = new Pair(false, null); + pair = new Pair<>(false, null); } else { pair.setFirst(false); pair.setSecond(null); @@ -782,7 +782,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"), response); } else { - List labels = new ArrayList(visLabels.size()); + List labels = new ArrayList<>(visLabels.size()); try { if (authorizationEnabled) { checkCallingUserAuth(); @@ -844,7 +844,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, response); } else { byte[] user = request.getUser().toByteArray(); - List labelAuths = new ArrayList(auths.size()); + List labelAuths = new ArrayList<>(auths.size()); try { if (authorizationEnabled) { checkCallingUserAuth(); @@ -959,7 +959,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, "VisibilityController not yet initialized"), response); } else { byte[] requestUser = request.getUser().toByteArray(); - List labelAuths = new ArrayList(auths.size()); + List labelAuths = new ArrayList<>(auths.size()); try { // When AC is ON, do AC based user auth check if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { @@ -1071,7 +1071,7 @@ public class VisibilityController implements MasterObserver, RegionObserver, @Override public ReturnCode filterKeyValue(Cell cell) throws IOException { - List putVisTags = new ArrayList(); + List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager .getInstance().getVisibilityLabelService() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java index 0948520b0c5..e27a4f826c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java @@ -54,10 +54,10 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { private static VisibilityLabelsCache instance; private ZKVisibilityLabelWatcher zkVisibilityWatcher; - private Map labels = new HashMap(); - private Map ordinalVsLabels = new HashMap(); - private Map> userAuths = new HashMap>(); - private Map> groupAuths = new HashMap>(); + private Map labels = new HashMap<>(); + private Map ordinalVsLabels = new HashMap<>(); + private Map> userAuths = new HashMap<>(); + private Map> groupAuths = new HashMap<>(); /** * This covers the members labels, ordinalVsLabels and userAuths @@ -145,10 +145,9 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { for (UserAuthorizations userAuths : multiUserAuths.getUserAuthsList()) { String user = Bytes.toString(userAuths.getUser().toByteArray()); if (AuthUtil.isGroupPrincipal(user)) { - this.groupAuths.put(AuthUtil.getGroupName(user), - new HashSet(userAuths.getAuthList())); + this.groupAuths.put(AuthUtil.getGroupName(user), new HashSet<>(userAuths.getAuthList())); } else { - this.userAuths.put(user, new HashSet(userAuths.getAuthList())); + this.userAuths.put(user, new HashSet<>(userAuths.getAuthList())); } } } finally { @@ -210,7 +209,7 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { List auths = EMPTY_LIST; Set authOrdinals = getUserAuthsAsOrdinals(user); if (!authOrdinals.equals(EMPTY_SET)) { - auths = new ArrayList(authOrdinals.size()); + auths = new ArrayList<>(authOrdinals.size()); for (Integer authOrdinal : authOrdinals) { auths.add(ordinalVsLabels.get(authOrdinal)); } @@ -227,7 +226,7 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { List auths = EMPTY_LIST; Set authOrdinals = getGroupAuthsAsOrdinals(groups); if (!authOrdinals.equals(EMPTY_SET)) { - auths = new ArrayList(authOrdinals.size()); + auths = new ArrayList<>(authOrdinals.size()); for (Integer authOrdinal : authOrdinals) { auths.add(ordinalVsLabels.get(authOrdinal)); } @@ -263,7 +262,7 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { public Set getGroupAuthsAsOrdinals(String[] groups) { this.lock.readLock().lock(); try { - Set authOrdinals = new HashSet(); + Set authOrdinals = new HashSet<>(); if (groups != null && groups.length > 0) { Set groupAuthOrdinals = null; for (String group : groups) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java index c1c3852e499..c77b7761ac6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java @@ -69,9 +69,9 @@ public class VisibilityReplicationEndpoint implements ReplicationEndpoint { // string based tags. But for intra cluster replication like region // replicas it is not needed. List entries = replicateContext.getEntries(); - List visTags = new ArrayList(); - List nonVisTags = new ArrayList(); - List newEntries = new ArrayList(entries.size()); + List visTags = new ArrayList<>(); + List nonVisTags = new ArrayList<>(); + List newEntries = new ArrayList<>(entries.size()); for (Entry entry : entries) { WALEdit newEdit = new WALEdit(); ArrayList cells = entry.getEdit().getCells(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java index 2595fe054bf..67181e1299a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityScanDeleteTracker.java @@ -53,17 +53,14 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { // type would solve this problem and also ensure that the combination of different type // of deletes with diff ts would also work fine // Track per TS - private List, Byte, Long>> visibilityTagsDeleteFamily = - new ArrayList, Byte, Long>>(); + private List, Byte, Long>> visibilityTagsDeleteFamily = new ArrayList<>(); // Delete family version with different ts and different visibility expression could come. // Need to track it per ts. - private List, Byte, Long>> visibilityTagsDeleteFamilyVersion = - new ArrayList, Byte, Long>>(); + private List, Byte, Long>> visibilityTagsDeleteFamilyVersion = new ArrayList<>(); private List, Byte>> visibilityTagsDeleteColumns; // Tracking as List is to handle same ts cell but different visibility tag. // TODO : Need to handle puts with same ts but different vis tags. - private List, Byte>> visiblityTagsDeleteColumnVersion = - new ArrayList, Byte>>(); + private List, Byte>> visiblityTagsDeleteColumnVersion = new ArrayList<>(); public VisibilityScanDeleteTracker() { super(); @@ -117,50 +114,46 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { Byte deleteCellVisTagsFormat = null; switch (type) { case DeleteFamily: - List delTags = new ArrayList(); + List delTags = new ArrayList<>(); if (visibilityTagsDeleteFamily == null) { - visibilityTagsDeleteFamily = new ArrayList, Byte, Long>>(); + visibilityTagsDeleteFamily = new ArrayList<>(); } deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteFamily.add(new Triple, Byte, Long>(delTags, - deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamily.add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); hasVisTag = true; } break; case DeleteFamilyVersion: if(visibilityTagsDeleteFamilyVersion == null) { - visibilityTagsDeleteFamilyVersion = new ArrayList, Byte, Long>>(); + visibilityTagsDeleteFamilyVersion = new ArrayList<>(); } - delTags = new ArrayList(); + delTags = new ArrayList<>(); deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteFamilyVersion.add(new Triple, Byte, Long>(delTags, - deleteCellVisTagsFormat, delCell.getTimestamp())); + visibilityTagsDeleteFamilyVersion.add(new Triple<>(delTags, deleteCellVisTagsFormat, delCell.getTimestamp())); hasVisTag = true; } break; case DeleteColumn: if (visibilityTagsDeleteColumns == null) { - visibilityTagsDeleteColumns = new ArrayList, Byte>>(); + visibilityTagsDeleteColumns = new ArrayList<>(); } - delTags = new ArrayList(); + delTags = new ArrayList<>(); deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visibilityTagsDeleteColumns.add(new Pair, Byte>(delTags, - deleteCellVisTagsFormat)); + visibilityTagsDeleteColumns.add(new Pair<>(delTags, deleteCellVisTagsFormat)); hasVisTag = true; } break; case Delete: if (visiblityTagsDeleteColumnVersion == null) { - visiblityTagsDeleteColumnVersion = new ArrayList, Byte>>(); + visiblityTagsDeleteColumnVersion = new ArrayList<>(); } - delTags = new ArrayList(); + delTags = new ArrayList<>(); deleteCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(delCell, delTags); if (!delTags.isEmpty()) { - visiblityTagsDeleteColumnVersion.add(new Pair, Byte>(delTags, - deleteCellVisTagsFormat)); + visiblityTagsDeleteColumnVersion.add(new Pair<>(delTags, deleteCellVisTagsFormat)); hasVisTag = true; } break; @@ -182,7 +175,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { // visibilityTagsDeleteFamily is ArrayList Triple, Byte, Long> triple = visibilityTagsDeleteFamily.get(i); if (timestamp <= triple.getThird()) { - List putVisTags = new ArrayList(); + List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, @@ -218,7 +211,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { // visibilityTagsDeleteFamilyVersion is ArrayList Triple, Byte, Long> triple = visibilityTagsDeleteFamilyVersion.get(i); if (timestamp == triple.getThird()) { - List putVisTags = new ArrayList(); + List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() .getVisibilityLabelService().matchVisibility(putVisTags, putCellVisTagsFormat, @@ -248,7 +241,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { if (visibilityTagsDeleteColumns != null) { if (!visibilityTagsDeleteColumns.isEmpty()) { for (Pair, Byte> tags : visibilityTagsDeleteColumns) { - List putVisTags = new ArrayList(); + List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() @@ -277,7 +270,7 @@ public class VisibilityScanDeleteTracker extends ScanDeleteTracker { if (visiblityTagsDeleteColumnVersion != null) { if (!visiblityTagsDeleteColumnVersion.isEmpty()) { for (Pair, Byte> tags : visiblityTagsDeleteColumnVersion) { - List putVisTags = new ArrayList(); + List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(cell, putVisTags); boolean matchFound = VisibilityLabelServiceManager.getInstance() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index 1db506d08c1..4441c086e97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -175,7 +175,7 @@ public class VisibilityUtils { String slgClassesCommaSeparated = conf.get(VISIBILITY_LABEL_GENERATOR_CLASS); // We have only System level SLGs now. The order of execution will be same as the order in the // comma separated config value - List slgs = new ArrayList(); + List slgs = new ArrayList<>(); if (StringUtils.isNotEmpty(slgClassesCommaSeparated)) { String[] slgClasses = slgClassesCommaSeparated.split(COMMA); for (String slgClass : slgClasses) { @@ -266,7 +266,7 @@ public class VisibilityUtils { public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations) throws IOException { - Map cfVsMaxVersions = new HashMap(); + Map cfVsMaxVersions = new HashMap<>(); for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) { cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions()); } @@ -302,10 +302,10 @@ public class VisibilityUtils { throw new IOException(e); } node = EXP_EXPANDER.expand(node); - List tags = new ArrayList(); + List tags = new ArrayList<>(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); - List labelOrdinals = new ArrayList(); + List labelOrdinals = new ArrayList<>(); // We will be adding this tag before the visibility tags and the presence of this // tag indicates we are supporting deletes with cell visibility if (withSerializationFormat) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java index 4399ecc55ff..9903b9b8fff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; @InterfaceAudience.Private public class NonLeafExpressionNode implements ExpressionNode { private Operator op; - private List childExps = new ArrayList(2); + private List childExps = new ArrayList<>(2); public NonLeafExpressionNode() { @@ -46,7 +46,7 @@ public class NonLeafExpressionNode implements ExpressionNode { public NonLeafExpressionNode(Operator op, ExpressionNode... exps) { this.op = op; - List expLst = new ArrayList(); + List expLst = new ArrayList<>(); Collections.addAll(expLst, exps); this.childExps = expLst; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index a5507fc0fe2..efae7e4f282 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -564,7 +564,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { final FileSystem fs, final Path snapshotDir) throws IOException { SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); - final List> files = new ArrayList>(); + final List> files = new ArrayList<>(); final TableName table = TableName.valueOf(snapshotDesc.getTable()); // Get snapshot files @@ -591,7 +591,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { } else { size = HFileLink.buildFromHFileLinkPattern(conf, path).getFileStatus(fs).getLen(); } - files.add(new Pair(fileInfo, size)); + files.add(new Pair<>(fileInfo, size)); } } }); @@ -618,8 +618,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { }); // create balanced groups - List>> fileGroups = - new LinkedList>>(); + List>> fileGroups = new LinkedList<>(); long[] sizeGroups = new long[ngroups]; int hi = files.size() - 1; int lo = 0; @@ -630,7 +629,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { while (hi >= lo) { if (g == fileGroups.size()) { - group = new LinkedList>(); + group = new LinkedList<>(); fileGroups.add(group); } else { group = fileGroups.get(g); @@ -703,7 +702,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { public ExportSnapshotInputSplit(final List> snapshotFiles) { this.files = new ArrayList(snapshotFiles.size()); for (Pair fileInfo: snapshotFiles) { - this.files.add(new Pair( + this.files.add(new Pair<>( new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); this.length += fileInfo.getSecond(); } @@ -726,13 +725,13 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { @Override public void readFields(DataInput in) throws IOException { int count = in.readInt(); - files = new ArrayList>(count); + files = new ArrayList<>(count); length = 0; for (int i = 0; i < count; ++i) { BytesWritable fileInfo = new BytesWritable(); fileInfo.readFields(in); long size = in.readLong(); - files.add(new Pair(fileInfo, size)); + files.add(new Pair<>(fileInfo, size)); length += size; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 98afe8bc132..63839c415a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -108,11 +108,9 @@ import org.apache.hadoop.io.IOUtils; public class RestoreSnapshotHelper { private static final Log LOG = LogFactory.getLog(RestoreSnapshotHelper.class); - private final Map regionsMap = - new TreeMap(Bytes.BYTES_COMPARATOR); + private final Map regionsMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); - private final Map > parentsMap = - new HashMap >(); + private final Map > parentsMap = new HashMap<>(); private final ForeignExceptionDispatcher monitor; private final MonitoredTask status; @@ -187,7 +185,7 @@ public class RestoreSnapshotHelper { // Take a copy of the manifest.keySet() since we are going to modify // this instance, by removing the regions already present in the restore dir. - Set regionNames = new HashSet(regionManifests.keySet()); + Set regionNames = new HashSet<>(regionManifests.keySet()); HRegionInfo mobRegion = MobUtils.getMobRegionInfo(snapshotManifest.getTableDescriptor() .getTableName()); @@ -213,7 +211,7 @@ public class RestoreSnapshotHelper { status.setStatus("Restoring table regions..."); if (regionNames.contains(mobRegion.getEncodedName())) { // restore the mob region in case - List mobRegions = new ArrayList(1); + List mobRegions = new ArrayList<>(1); mobRegions.add(mobRegion); restoreHdfsMobRegions(exec, regionManifests, mobRegions); regionNames.remove(mobRegion.getEncodedName()); @@ -230,7 +228,7 @@ public class RestoreSnapshotHelper { // Regions to Add: present in the snapshot but not in the current table if (regionNames.size() > 0) { - List regionsToAdd = new ArrayList(regionNames.size()); + List regionsToAdd = new ArrayList<>(regionNames.size()); monitor.rethrowException(); // add the mob region @@ -344,14 +342,14 @@ public class RestoreSnapshotHelper { void addRegionToRemove(final HRegionInfo hri) { if (regionsToRemove == null) { - regionsToRemove = new LinkedList(); + regionsToRemove = new LinkedList<>(); } regionsToRemove.add(hri); } void addRegionToRestore(final HRegionInfo hri) { if (regionsToRestore == null) { - regionsToRestore = new LinkedList(); + regionsToRestore = new LinkedList<>(); } regionsToRestore.add(hri); } @@ -361,7 +359,7 @@ public class RestoreSnapshotHelper { if (regionInfos == null || parentsMap.isEmpty()) return; // Extract region names and offlined regions - Map regionsByName = new HashMap(regionInfos.size()); + Map regionsByName = new HashMap<>(regionInfos.size()); List parentRegions = new LinkedList<>(); for (HRegionInfo regionInfo: regionInfos) { if (regionInfo.isSplitParent()) { @@ -441,10 +439,10 @@ public class RestoreSnapshotHelper { private Map> getRegionHFileReferences( final SnapshotRegionManifest manifest) { Map> familyMap = - new HashMap>(manifest.getFamilyFilesCount()); + new HashMap<>(manifest.getFamilyFilesCount()); for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { familyMap.put(familyFiles.getFamilyName().toStringUtf8(), - new ArrayList(familyFiles.getStoreFilesList())); + new ArrayList<>(familyFiles.getStoreFilesList())); } return familyMap; } @@ -489,8 +487,7 @@ public class RestoreSnapshotHelper { List snapshotFamilyFiles = snapshotFiles.remove(familyDir.getName()); if (snapshotFamilyFiles != null) { - List hfilesToAdd = - new ArrayList(); + List hfilesToAdd = new ArrayList<>(); for (SnapshotRegionManifest.StoreFile storeFile: snapshotFamilyFiles) { if (familyFiles.contains(storeFile.getName())) { // HFile already present @@ -546,7 +543,7 @@ public class RestoreSnapshotHelper { FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir); if (hfiles == null) return Collections.emptySet(); - Set familyFiles = new HashSet(hfiles.length); + Set familyFiles = new HashSet<>(hfiles.length); for (int i = 0; i < hfiles.length; ++i) { String hfileName = hfiles[i].getPath().getName(); familyFiles.add(hfileName); @@ -564,8 +561,7 @@ public class RestoreSnapshotHelper { final List regions) throws IOException { if (regions == null || regions.isEmpty()) return null; - final Map snapshotRegions = - new HashMap(regions.size()); + final Map snapshotRegions = new HashMap<>(regions.size()); // clone region info (change embedded tableName with the new one) HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()]; @@ -742,7 +738,7 @@ public class RestoreSnapshotHelper { synchronized (parentsMap) { Pair daughters = parentsMap.get(clonedRegionName); if (daughters == null) { - daughters = new Pair(regionName, null); + daughters = new Pair<>(regionName, null); parentsMap.put(clonedRegionName, daughters); } else if (!regionName.equals(daughters.getFirst())) { daughters.setSecond(regionName); @@ -778,7 +774,7 @@ public class RestoreSnapshotHelper { FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regionDirs == null) return null; - List regions = new ArrayList(regionDirs.length); + List regions = new ArrayList<>(regionDirs.length); for (int i = 0; i < regionDirs.length; ++i) { HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath()); regions.add(hri); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index 85d3af38d8f..6dbd3f09e30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -598,8 +598,7 @@ public final class SnapshotInfo extends AbstractHBaseTool { Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); FileStatus[] snapshots = fs.listStatus(snapshotDir, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); - List snapshotLists = - new ArrayList(snapshots.length); + List snapshotLists = new ArrayList<>(snapshots.length); for (FileStatus snapshotDirStat: snapshots) { HBaseProtos.SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 47e3073fc0d..4e838ad4b19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -220,7 +220,7 @@ public final class SnapshotManifest { Object familyData = visitor.familyOpen(regionData, store.getFamily().getName()); monitor.rethrowException(); - List storeFiles = new ArrayList(store.getStorefiles()); + List storeFiles = new ArrayList<>(store.getStorefiles()); if (LOG.isDebugEnabled()) { LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); } @@ -305,7 +305,7 @@ public final class SnapshotManifest { FileStatus[] stats = FSUtils.listStatus(fs, storeDir); if (stats == null) return null; - ArrayList storeFiles = new ArrayList(stats.length); + ArrayList storeFiles = new ArrayList<>(stats.length); for (int i = 0; i < stats.length; ++i) { storeFiles.add(new StoreFileInfo(conf, fs, stats[i])); } @@ -374,8 +374,7 @@ public final class SnapshotManifest { tpool.shutdown(); } if (v1Regions != null && v2Regions != null) { - regionManifests = - new ArrayList(v1Regions.size() + v2Regions.size()); + regionManifests = new ArrayList<>(v1Regions.size() + v2Regions.size()); regionManifests.addAll(v1Regions); regionManifests.addAll(v2Regions); } else if (v1Regions != null) { @@ -427,8 +426,7 @@ public final class SnapshotManifest { public Map getRegionManifestsMap() { if (regionManifests == null || regionManifests.isEmpty()) return null; - HashMap regionsMap = - new HashMap(regionManifests.size()); + HashMap regionsMap = new HashMap<>(regionManifests.size()); for (SnapshotRegionManifest manifest: regionManifests) { String regionName = getRegionNameFromManifest(manifest); regionsMap.put(regionName, manifest); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java index cceeebc1325..46893f90002 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java @@ -121,7 +121,7 @@ public final class SnapshotManifestV1 { } final ExecutorCompletionService completionService = - new ExecutorCompletionService(executor); + new ExecutorCompletionService<>(executor); for (final FileStatus region: regions) { completionService.submit(new Callable() { @Override @@ -132,8 +132,7 @@ public final class SnapshotManifestV1 { }); } - ArrayList regionsManifest = - new ArrayList(regions.length); + ArrayList regionsManifest = new ArrayList<>(regions.length); try { for (int i = 0; i < regions.length; ++i) { regionsManifest.add(completionService.take().get()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index a1341fb81f6..567f42dd1ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -139,7 +139,7 @@ public final class SnapshotManifestV2 { if (manifestFiles == null || manifestFiles.length == 0) return null; final ExecutorCompletionService completionService = - new ExecutorCompletionService(executor); + new ExecutorCompletionService<>(executor); for (final FileStatus st: manifestFiles) { completionService.submit(new Callable() { @Override @@ -157,8 +157,7 @@ public final class SnapshotManifestV2 { }); } - ArrayList regionsManifest = - new ArrayList(manifestFiles.length); + ArrayList regionsManifest = new ArrayList<>(manifestFiles.length); try { for (int i = 0; i < manifestFiles.length; ++i) { regionsManifest.add(completionService.take().get()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java index 8cd438e791c..7a2bfe64fbc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java @@ -210,8 +210,7 @@ public final class SnapshotReferenceUtil { return; } - final ExecutorCompletionService completionService = - new ExecutorCompletionService(exec); + final ExecutorCompletionService completionService = new ExecutorCompletionService<>(exec); for (final SnapshotRegionManifest regionManifest : regionManifests) { completionService.submit(new Callable() { @@ -345,7 +344,7 @@ public final class SnapshotReferenceUtil { private static Set getHFileNames(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { - final Set names = new HashSet(); + final Set names = new HashSet<>(); visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() { @Override public void storeFile(final HRegionInfo regionInfo, final String family, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 73160bc3bd9..ee93cdb1f05 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -140,8 +140,8 @@ public final class Canary implements Tool { private AtomicLong readFailureCount = new AtomicLong(0), writeFailureCount = new AtomicLong(0); - private Map readFailures = new ConcurrentHashMap(); - private Map writeFailures = new ConcurrentHashMap(); + private Map readFailures = new ConcurrentHashMap<>(); + private Map writeFailures = new ConcurrentHashMap<>(); @Override public long getReadFailureCount() { @@ -949,7 +949,7 @@ public final class Canary implements Tool { public void run() { if (this.initAdmin()) { try { - List> taskFutures = new LinkedList>(); + List> taskFutures = new LinkedList<>(); if (this.targets != null && this.targets.length > 0) { String[] tables = generateMonitorTables(this.targets); this.initialized = true; @@ -996,7 +996,7 @@ public final class Canary implements Tool { if (this.useRegExp) { Pattern pattern = null; HTableDescriptor[] tds = null; - Set tmpTables = new TreeSet(); + Set tmpTables = new TreeSet<>(); try { if (LOG.isDebugEnabled()) { LOG.debug(String.format("reading list of tables")); @@ -1040,7 +1040,7 @@ public final class Canary implements Tool { if (LOG.isDebugEnabled()) { LOG.debug(String.format("reading list of tables")); } - List> taskFutures = new LinkedList>(); + List> taskFutures = new LinkedList<>(); for (HTableDescriptor table : admin.listTables()) { if (admin.isTableEnabled(table.getTableName()) && (!table.getTableName().equals(writeTableName))) { @@ -1078,7 +1078,7 @@ public final class Canary implements Tool { admin.deleteTable(writeTableName); createWriteTable(numberOfServers); } - HashSet serverSet = new HashSet(); + HashSet serverSet = new HashSet<>(); for (Pair pair : pairs) { serverSet.add(pair.getSecond()); } @@ -1165,7 +1165,7 @@ public final class Canary implements Tool { } else { LOG.warn(String.format("Table %s is not enabled", tableName)); } - return new LinkedList>(); + return new LinkedList<>(); } /* @@ -1183,7 +1183,7 @@ public final class Canary implements Tool { try { table = admin.getConnection().getTable(tableDesc.getTableName()); } catch (TableNotFoundException e) { - return new ArrayList>(); + return new ArrayList<>(); } finally { if (table !=null) { @@ -1191,7 +1191,7 @@ public final class Canary implements Tool { } } - List tasks = new ArrayList(); + List tasks = new ArrayList<>(); RegionLocator regionLocator = null; try { regionLocator = admin.getConnection().getRegionLocator(tableDesc.getTableName()); @@ -1290,7 +1290,7 @@ public final class Canary implements Tool { } private boolean checkNoTableNames() { - List foundTableNames = new ArrayList(); + List foundTableNames = new ArrayList<>(); TableName[] tableNames = null; if (LOG.isDebugEnabled()) { @@ -1323,8 +1323,8 @@ public final class Canary implements Tool { } private void monitorRegionServers(Map> rsAndRMap) { - List tasks = new ArrayList(); - Map successMap = new HashMap(); + List tasks = new ArrayList<>(); + Map successMap = new HashMap<>(); Random rand = new Random(); for (Map.Entry> entry : rsAndRMap.entrySet()) { String serverName = entry.getKey(); @@ -1379,7 +1379,7 @@ public final class Canary implements Tool { } private Map> getAllRegionServerByName() { - Map> rsAndRMap = new HashMap>(); + Map> rsAndRMap = new HashMap<>(); Table table = null; RegionLocator regionLocator = null; try { @@ -1400,7 +1400,7 @@ public final class Canary implements Tool { if (rsAndRMap.containsKey(rsName)) { regions = rsAndRMap.get(rsName); } else { - regions = new ArrayList(); + regions = new ArrayList<>(); rsAndRMap.put(rsName, regions); } regions.add(r); @@ -1438,7 +1438,7 @@ public final class Canary implements Tool { Map> filteredRsAndRMap = null; if (this.targets != null && this.targets.length > 0) { - filteredRsAndRMap = new HashMap>(); + filteredRsAndRMap = new HashMap<>(); Pattern pattern = null; Matcher matcher = null; boolean regExpFound = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java index 4a9315178aa..354382c2789 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java @@ -152,7 +152,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements */ public BoundedPriorityBlockingQueue(int capacity, Comparator comparator) { - this.queue = new PriorityQueue(capacity, comparator); + this.queue = new PriorityQueue<>(capacity, comparator); } public boolean offer(E e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java index 3f05969cd86..9e36290733a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CollectionBackedScanner.java @@ -66,7 +66,7 @@ public class CollectionBackedScanner extends NonReversedNonLazyKeyValueScanner { Cell... array) { this.comparator = comparator; - List tmp = new ArrayList(array.length); + List tmp = new ArrayList<>(array.length); Collections.addAll(tmp, array); Collections.sort(tmp, comparator); data = tmp; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java index 0659a0d37aa..87e867fce19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java @@ -50,9 +50,8 @@ import org.apache.commons.logging.LogFactory; public class ConnectionCache { private static final Log LOG = LogFactory.getLog(ConnectionCache.class); - private final Map - connections = new ConcurrentHashMap(); - private final KeyLocker locker = new KeyLocker(); + private final Map connections = new ConcurrentHashMap<>(); + private final KeyLocker locker = new KeyLocker<>(); private final String realUserName; private final UserGroupInformation realUser; private final UserProvider userProvider; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java index f45ecffa80d..6692ee83a45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java @@ -40,10 +40,9 @@ import org.apache.hadoop.hbase.security.EncryptionUtil; public class EncryptionTest { private static final Log LOG = LogFactory.getLog(EncryptionTest.class); - static final Map keyProviderResults = new ConcurrentHashMap(); - static final Map cipherProviderResults = - new ConcurrentHashMap(); - static final Map cipherResults = new ConcurrentHashMap(); + static final Map keyProviderResults = new ConcurrentHashMap<>(); + static final Map cipherProviderResults = new ConcurrentHashMap<>(); + static final Map cipherResults = new ConcurrentHashMap<>(); private EncryptionTest() { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java index 0d880d09631..de49d389ba5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java @@ -58,7 +58,7 @@ public class FSHDFSUtils extends FSUtils { */ private static Set getNNAddresses(DistributedFileSystem fs, Configuration conf) { - Set addresses = new HashSet(); + Set addresses = new HashSet<>(); String serviceName = fs.getCanonicalServiceName(); if (serviceName.startsWith("ha-hdfs")) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java index b0af52b6ca8..0bc87830d11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -72,7 +72,7 @@ class FSRegionScanner implements Runnable { public void run() { try { // empty the map for each region - Map blockCountMap = new HashMap(); + Map blockCountMap = new HashMap<>(); //get table name String tableName = regionPath.getParent().getName(); @@ -145,7 +145,7 @@ class FSRegionScanner implements Runnable { } if (regionDegreeLocalityMapping != null && totalBlkCount > 0) { - Map hostLocalityMap = new HashMap(); + Map hostLocalityMap = new HashMap<>(); for (Map.Entry entry : blockCountMap.entrySet()) { String host = entry.getKey(); if (host.endsWith(".")) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index a100a15f7b7..c2ca3eb793e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -89,8 +89,7 @@ public class FSTableDescriptors implements TableDescriptors { // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. - private final Map cache = - new ConcurrentHashMap(); + private final Map cache = new ConcurrentHashMap<>(); /** * Table descriptor for hbase:meta catalog table @@ -271,7 +270,7 @@ public class FSTableDescriptors implements TableDescriptors { @Override public Map getAllDescriptors() throws IOException { - Map tds = new TreeMap(); + Map tds = new TreeMap<>(); if (fsvisited && usecache) { for (Map.Entry entry: this.cache.entrySet()) { @@ -307,7 +306,7 @@ public class FSTableDescriptors implements TableDescriptors { */ @Override public Map getAll() throws IOException { - Map htds = new TreeMap(); + Map htds = new TreeMap<>(); Map allDescriptors = getAllDescriptors(); for (Map.Entry entry : allDescriptors .entrySet()) { @@ -323,7 +322,7 @@ public class FSTableDescriptors implements TableDescriptors { @Override public Map getByNamespace(String name) throws IOException { - Map htds = new TreeMap(); + Map htds = new TreeMap<>(); List tableDirs = FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name)); for (Path d: tableDirs) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 84b34367544..c78ba0634e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1240,7 +1240,7 @@ public abstract class FSUtils { public static Map getTableFragmentation( final FileSystem fs, final Path hbaseRootDir) throws IOException { - Map frags = new HashMap(); + Map frags = new HashMap<>(); int cfCountTotal = 0; int cfFragTotal = 0; PathFilter regionFilter = new RegionDirFilter(fs); @@ -1434,7 +1434,7 @@ public abstract class FSUtils { public static List getTableDirs(final FileSystem fs, final Path rootdir) throws IOException { - List tableDirs = new LinkedList(); + List tableDirs = new LinkedList<>(); for(FileStatus status : fs.globStatus(new Path(rootdir, @@ -1455,7 +1455,7 @@ public abstract class FSUtils { throws IOException { // presumes any directory under hbase.rootdir is a table FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs)); - List tabledirs = new ArrayList(dirs.length); + List tabledirs = new ArrayList<>(dirs.length); for (FileStatus dir: dirs) { tabledirs.add(dir.getPath()); } @@ -1511,9 +1511,9 @@ public abstract class FSUtils { // assumes we are in a table dir. List rds = listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); if (rds == null) { - return new ArrayList(); + return new ArrayList<>(); } - List regionDirs = new ArrayList(rds.size()); + List regionDirs = new ArrayList<>(rds.size()); for (FileStatus rdfs: rds) { Path rdPath = rdfs.getPath(); regionDirs.add(rdPath); @@ -1563,7 +1563,7 @@ public abstract class FSUtils { public static List getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException { // assumes we are in a region dir. FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs)); - List familyDirs = new ArrayList(fds.length); + List familyDirs = new ArrayList<>(fds.length); for (FileStatus fdfs: fds) { Path fdPath = fdfs.getPath(); familyDirs.add(fdPath); @@ -1574,9 +1574,9 @@ public abstract class FSUtils { public static List getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException { List fds = listStatusWithStatusFilter(fs, familyDir, new ReferenceFileFilter(fs)); if (fds == null) { - return new ArrayList(); + return new ArrayList<>(); } - List referenceFiles = new ArrayList(fds.size()); + List referenceFiles = new ArrayList<>(fds.size()); for (FileStatus fdfs: fds) { Path fdPath = fdfs.getPath(); referenceFiles.add(fdPath); @@ -1709,14 +1709,14 @@ public abstract class FSUtils { ExecutorService executor, final ErrorReporter errors) throws IOException, InterruptedException { final Map finalResultMap = - resultMap == null ? new ConcurrentHashMap(128, 0.75f, 32) : resultMap; + resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap; // only include the directory paths to tables Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); // Inside a table, there are compaction.dir directories to skip. Otherwise, all else // should be regions. final FamilyDirFilter familyFilter = new FamilyDirFilter(fs); - final Vector exceptions = new Vector(); + final Vector exceptions = new Vector<>(); try { List regionDirs = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs)); @@ -1724,7 +1724,7 @@ public abstract class FSUtils { return finalResultMap; } - final List> futures = new ArrayList>(regionDirs.size()); + final List> futures = new ArrayList<>(regionDirs.size()); for (FileStatus regionDir : regionDirs) { if (null != errors) { @@ -1740,7 +1740,7 @@ public abstract class FSUtils { @Override public void run() { try { - HashMap regionStoreFileMap = new HashMap(); + HashMap regionStoreFileMap = new HashMap<>(); List familyDirs = FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter); if (familyDirs == null) { if (!fs.exists(dd)) { @@ -1785,7 +1785,7 @@ public abstract class FSUtils { Future future = executor.submit(getRegionStoreFileMapCall); futures.add(future); } else { - FutureTask future = new FutureTask(getRegionStoreFileMapCall, null); + FutureTask future = new FutureTask<>(getRegionStoreFileMapCall, null); future.run(); futures.add(future); } @@ -1871,7 +1871,7 @@ public abstract class FSUtils { final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter, ExecutorService executor, ErrorReporter errors) throws IOException, InterruptedException { - ConcurrentHashMap map = new ConcurrentHashMap(1024, 0.75f, 32); + ConcurrentHashMap map = new ConcurrentHashMap<>(1024, 0.75f, 32); // if this method looks similar to 'getTableFragmentation' that is because // it was borrowed from it. @@ -1907,7 +1907,7 @@ public abstract class FSUtils { public static List filterFileStatuses(Iterator input, FileStatusFilter filter) { if (input == null) return null; - ArrayList results = new ArrayList(); + ArrayList results = new ArrayList<>(); while (input.hasNext()) { FileStatus f = input.next(); if (filter.accept(f)) { @@ -2167,8 +2167,7 @@ public abstract class FSUtils { public static Map> getRegionDegreeLocalityMappingFromFS( final Configuration conf, final String desiredTable, int threadPoolSize) throws IOException { - Map> regionDegreeLocalityMapping = - new ConcurrentHashMap>(); + Map> regionDegreeLocalityMapping = new ConcurrentHashMap<>(); getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null, regionDegreeLocalityMapping); return regionDegreeLocalityMapping; @@ -2253,7 +2252,7 @@ public abstract class FSUtils { // run in multiple threads ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 60, TimeUnit.SECONDS, - new ArrayBlockingQueue(statusList.length)); + new ArrayBlockingQueue<>(statusList.length)); try { // ignore all file status items that are not of interest for (FileStatus regionStatus : statusList) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 4d44187125f..7b3b25b6a21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -256,7 +256,7 @@ public class HBaseFsck extends Configured implements Closeable { // limit checking/fixes to listed tables, if empty attempt to check/fix all // hbase:meta are always checked - private Set tablesIncluded = new HashSet(); + private Set tablesIncluded = new HashSet<>(); private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge // maximum number of overlapping regions to sideline private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE; @@ -280,9 +280,9 @@ public class HBaseFsck extends Configured implements Closeable { * name to HbckInfo structure. The information contained in HbckInfo is used * to detect and correct consistency (hdfs/meta/deployment) problems. */ - private TreeMap regionInfoMap = new TreeMap(); + private TreeMap regionInfoMap = new TreeMap<>(); // Empty regioninfo qualifiers in hbase:meta - private Set emptyRegionInfoQualifiers = new HashSet(); + private Set emptyRegionInfoQualifiers = new HashSet<>(); /** * This map from Tablename -> TableInfo contains the structures necessary to @@ -294,22 +294,19 @@ public class HBaseFsck extends Configured implements Closeable { * unless checkMetaOnly is specified, in which case, it contains only * the meta table */ - private SortedMap tablesInfo = - new ConcurrentSkipListMap(); + private SortedMap tablesInfo = new ConcurrentSkipListMap<>(); /** * When initially looking at HDFS, we attempt to find any orphaned data. */ private List orphanHdfsDirs = Collections.synchronizedList(new ArrayList()); - private Map> orphanTableDirs = - new HashMap>(); - private Map tableStates = - new HashMap(); + private Map> orphanTableDirs = new HashMap<>(); + private Map tableStates = new HashMap<>(); private final RetryCounterFactory lockFileRetryCounterFactory; private final RetryCounterFactory createZNodeRetryCounterFactory; - private Map> skippedRegions = new HashMap>(); + private Map> skippedRegions = new HashMap<>(); private ZooKeeperWatcher zkw = null; private String hbckEphemeralNodePath = null; @@ -431,7 +428,7 @@ public class HBaseFsck extends Configured implements Closeable { RetryCounter retryCounter = lockFileRetryCounterFactory.create(); FileLockCallable callable = new FileLockCallable(retryCounter); ExecutorService executor = Executors.newFixedThreadPool(1); - FutureTask futureTask = new FutureTask(callable); + FutureTask futureTask = new FutureTask<>(callable); executor.execute(futureTask); final int timeoutInSeconds = getConf().getInt( "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT); @@ -977,7 +974,7 @@ public class HBaseFsck extends Configured implements Closeable { // expand the range to include the range of all hfiles if (orphanRegionRange == null) { // first range - orphanRegionRange = new Pair(start, end); + orphanRegionRange = new Pair<>(start, end); } else { // TODO add test @@ -1267,7 +1264,7 @@ public class HBaseFsck extends Configured implements Closeable { Collection hbckInfos = regionInfoMap.values(); // Parallelized read of .regioninfo files. - List hbis = new ArrayList(hbckInfos.size()); + List hbis = new ArrayList<>(hbckInfos.size()); List> hbiFutures; for (HbckInfo hbi : hbckInfos) { @@ -1323,7 +1320,7 @@ public class HBaseFsck extends Configured implements Closeable { //should only report once for each table errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE, "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName); - Set columns = new HashSet(); + Set columns = new HashSet<>(); orphanTableDirs.put(tableName, getColumnFamilyList(columns, hbi)); } } @@ -1402,7 +1399,7 @@ public class HBaseFsck extends Configured implements Closeable { public void fixOrphanTables() throws IOException { if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) { - List tmpList = new ArrayList(orphanTableDirs.keySet().size()); + List tmpList = new ArrayList<>(orphanTableDirs.keySet().size()); tmpList.addAll(orphanTableDirs.keySet()); HTableDescriptor[] htds = getHTableDescriptors(tmpList); Iterator>> iter = @@ -1485,7 +1482,7 @@ public class HBaseFsck extends Configured implements Closeable { */ private ArrayList generatePuts( SortedMap tablesInfo) throws IOException { - ArrayList puts = new ArrayList(); + ArrayList puts = new ArrayList<>(); boolean hasProblems = false; for (Entry e : tablesInfo.entrySet()) { TableName name = e.getKey(); @@ -1936,7 +1933,7 @@ public class HBaseFsck extends Configured implements Closeable { void processRegionServers(Collection regionServerList) throws IOException, InterruptedException { - List workItems = new ArrayList(regionServerList.size()); + List workItems = new ArrayList<>(regionServerList.size()); List> workFutures; // loop to contact each region server in parallel @@ -1966,8 +1963,7 @@ public class HBaseFsck extends Configured implements Closeable { // Divide the checks in two phases. One for default/primary replicas and another // for the non-primary ones. Keeps code cleaner this way. - List workItems = - new ArrayList(regionInfoMap.size()); + List workItems = new ArrayList<>(regionInfoMap.size()); for (java.util.Map.Entry e: regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); @@ -1979,8 +1975,7 @@ public class HBaseFsck extends Configured implements Closeable { setCheckHdfs(false); //replicas don't have any hdfs data // Run a pass over the replicas and fix any assignment issues that exist on the currently // deployed/undeployed replicas. - List replicaWorkItems = - new ArrayList(regionInfoMap.size()); + List replicaWorkItems = new ArrayList<>(regionInfoMap.size()); for (java.util.Map.Entry e: regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); @@ -2065,7 +2060,7 @@ public class HBaseFsck extends Configured implements Closeable { private void addSkippedRegion(final HbckInfo hbi) { Set skippedRegionNames = skippedRegions.get(hbi.getTableName()); if (skippedRegionNames == null) { - skippedRegionNames = new HashSet(); + skippedRegionNames = new HashSet<>(); } skippedRegionNames.add(hbi.getRegionNameAsString()); skippedRegions.put(hbi.getTableName(), skippedRegionNames); @@ -2570,7 +2565,7 @@ public class HBaseFsck extends Configured implements Closeable { * @throws IOException */ SortedMap checkIntegrity() throws IOException { - tablesInfo = new TreeMap (); + tablesInfo = new TreeMap<>(); LOG.debug("There are " + regionInfoMap.size() + " region info entries"); for (HbckInfo hbi : regionInfoMap.values()) { // Check only valid, working regions @@ -2753,16 +2748,16 @@ public class HBaseFsck extends Configured implements Closeable { TreeSet deployedOn; // backwards regions - final List backwards = new ArrayList(); + final List backwards = new ArrayList<>(); // sidelined big overlapped regions - final Map sidelinedRegions = new HashMap(); + final Map sidelinedRegions = new HashMap<>(); // region split calculator - final RegionSplitCalculator sc = new RegionSplitCalculator(cmp); + final RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); // Histogram of different HTableDescriptors found. Ideally there is only one! - final Set htds = new HashSet(); + final Set htds = new HashSet<>(); // key = start split, values = set of splits in problem group final Multimap overlapGroups = @@ -2773,7 +2768,7 @@ public class HBaseFsck extends Configured implements Closeable { TableInfo(TableName name) { this.tableName = name; - deployedOn = new TreeSet (); + deployedOn = new TreeSet <>(); } /** @@ -2829,7 +2824,7 @@ public class HBaseFsck extends Configured implements Closeable { public synchronized ImmutableList getRegionsFromMeta() { // lazy loaded, synchronized to ensure a single load if (regionsFromMeta == null) { - List regions = new ArrayList(); + List regions = new ArrayList<>(); for (HbckInfo h : HBaseFsck.this.regionInfoMap.values()) { if (tableName.equals(h.getTableName())) { if (h.metaEntry != null) { @@ -3031,7 +3026,7 @@ public class HBaseFsck extends Configured implements Closeable { Pair range = null; for (HbckInfo hi : overlap) { if (range == null) { - range = new Pair(hi.getStartKey(), hi.getEndKey()); + range = new Pair<>(hi.getStartKey(), hi.getEndKey()); } else { if (RegionSplitCalculator.BYTES_COMPARATOR .compare(hi.getStartKey(), range.getFirst()) < 0) { @@ -3200,7 +3195,7 @@ public class HBaseFsck extends Configured implements Closeable { overlapGroups.putAll(problemKey, ranges); // record errors - ArrayList subRange = new ArrayList(ranges); + ArrayList subRange = new ArrayList<>(ranges); // this dumb and n^2 but this shouldn't happen often for (HbckInfo r1 : ranges) { if (r1.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) continue; @@ -3275,7 +3270,7 @@ public class HBaseFsck extends Configured implements Closeable { throws IOException { // we parallelize overlap handler for the case we have lots of groups to fix. We can // safely assume each group is independent. - List merges = new ArrayList(overlapGroups.size()); + List merges = new ArrayList<>(overlapGroups.size()); List> rets; for (Collection overlap : overlapGroups.asMap().values()) { // @@ -3364,7 +3359,7 @@ public class HBaseFsck extends Configured implements Closeable { * @throws IOException if an error is encountered */ HTableDescriptor[] getTables(AtomicInteger numSkipped) { - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); long now = EnvironmentEdgeManager.currentTime(); for (HbckInfo hbi : regionInfoMap.values()) { @@ -3429,7 +3424,7 @@ public class HBaseFsck extends Configured implements Closeable { * @throws InterruptedException */ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedException { - Map metaRegions = new HashMap(); + Map metaRegions = new HashMap<>(); for (HbckInfo value : regionInfoMap.values()) { if (value.metaEntry != null && value.metaEntry.isMetaRegion()) { metaRegions.put(value.getReplicaId(), value); @@ -3442,7 +3437,7 @@ public class HBaseFsck extends Configured implements Closeable { // Check the deployed servers. It should be exactly one server for each replica. for (int i = 0; i < metaReplication; i++) { HbckInfo metaHbckInfo = metaRegions.remove(i); - List servers = new ArrayList(); + List servers = new ArrayList<>(); if (metaHbckInfo != null) { servers = metaHbckInfo.deployedOn; } @@ -3979,10 +3974,10 @@ public class HBaseFsck extends Configured implements Closeable { // How frequently calls to progress() will create output private static final int progressThreshold = 100; - Set errorTables = new HashSet(); + Set errorTables = new HashSet<>(); // for use by unit tests to verify which errors were discovered - private ArrayList errorList = new ArrayList(); + private ArrayList errorList = new ArrayList<>(); @Override public void clear() { @@ -4183,11 +4178,11 @@ public class HBaseFsck extends Configured implements Closeable { @Override public synchronized Void call() throws InterruptedException, ExecutionException { - final Vector exceptions = new Vector(); + final Vector exceptions = new Vector<>(); try { final FileStatus[] regionDirs = fs.listStatus(tableDir.getPath()); - final List> futures = new ArrayList>(regionDirs.length); + final List> futures = new ArrayList<>(regionDirs.length); for (final FileStatus regionDir : regionDirs) { errors.progress(); @@ -4554,7 +4549,7 @@ public class HBaseFsck extends Configured implements Closeable { } Set getIncludedTables() { - return new HashSet(tablesIncluded); + return new HashSet<>(tablesIncluded); } /** @@ -4865,7 +4860,7 @@ public class HBaseFsck extends Configured implements Closeable { HFileCorruptionChecker hfcc = createHFileCorruptionChecker(sidelineCorruptHFiles); setHFileCorruptionChecker(hfcc); // so we can get result Collection tables = getIncludedTables(); - Collection tableDirs = new ArrayList(); + Collection tableDirs = new ArrayList<>(); Path rootdir = FSUtils.getRootDir(getConf()); if (tables.size() > 0) { for (TableName t : tables) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java index 7f283e676a1..e5dbae20849 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java @@ -58,8 +58,7 @@ public class IdLock { } } - private ConcurrentMap map = - new ConcurrentHashMap(); + private ConcurrentMap map = new ConcurrentHashMap<>(); /** * Blocks until the lock corresponding to the given id is acquired. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java index 98ce80d2190..caf3265c8cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java @@ -46,8 +46,7 @@ public class IdReadWriteLock { private static final int NB_CONCURRENT_LOCKS = 1000; // The pool to get entry from, entries are mapped by weak reference to make it able to be // garbage-collected asap - private final WeakObjectPool lockPool = - new WeakObjectPool( + private final WeakObjectPool lockPool = new WeakObjectPool<>( new WeakObjectPool.ObjectFactory() { @Override public ReentrantReadWriteLock createObject(Long id) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java index b0bca000732..9f4b271b27b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JvmVersion.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Stable public abstract class JvmVersion { - private static Set BAD_JVM_VERSIONS = new HashSet(); + private static Set BAD_JVM_VERSIONS = new HashSet<>(); static { BAD_JVM_VERSIONS.add("1.6.0_18"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index f11d38b9266..d7749c2b849 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -128,9 +128,8 @@ public abstract class ModifyRegionUtils { final RegionFillTask task) throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; - CompletionService completionService = - new ExecutorCompletionService(exec); - List regionInfos = new ArrayList(); + CompletionService completionService = new ExecutorCompletionService<>(exec); + List regionInfos = new ArrayList<>(); for (final HRegionInfo newRegion : newRegions) { completionService.submit(new Callable() { @Override @@ -193,8 +192,7 @@ public abstract class ModifyRegionUtils { */ public static void editRegions(final ThreadPoolExecutor exec, final Collection regions, final RegionEditTask task) throws IOException { - final ExecutorCompletionService completionService = - new ExecutorCompletionService(exec); + final ExecutorCompletionService completionService = new ExecutorCompletionService<>(exec); for (final HRegionInfo hri: regions) { completionService.submit(new Callable() { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java index 8cb880d3086..47217817acf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MunkresAssignment.java @@ -111,7 +111,7 @@ public class MunkresAssignment { mask = new byte[rows][cols]; rowsCovered = new boolean[rows]; colsCovered = new boolean[cols]; - path = new LinkedList>(); + path = new LinkedList<>(); leastInRow = new float[rows]; leastInRowIndex = new int[rows]; @@ -330,8 +330,7 @@ public class MunkresAssignment { // starting from the uncovered primed zero (there is only one). Since // we have already found it, save it as the first node in the path. path.clear(); - path.offerLast(new Pair(zero.getFirst(), - zero.getSecond())); + path.offerLast(new Pair<>(zero.getFirst(), zero.getSecond())); return true; } } @@ -439,7 +438,7 @@ public class MunkresAssignment { private Pair findUncoveredZero() { for (int r = 0; r < rows; r++) { if (leastInRow[r] == 0) { - return new Pair(r, leastInRowIndex[r]); + return new Pair<>(r, leastInRowIndex[r]); } } return null; @@ -476,7 +475,7 @@ public class MunkresAssignment { private Pair starInRow(int r) { for (int c = 0; c < cols; c++) { if (mask[r][c] == STAR) { - return new Pair(r, c); + return new Pair<>(r, c); } } return null; @@ -491,7 +490,7 @@ public class MunkresAssignment { private Pair starInCol(int c) { for (int r = 0; r < rows; r++) { if (mask[r][c] == STAR) { - return new Pair(r, c); + return new Pair<>(r, c); } } return null; @@ -506,7 +505,7 @@ public class MunkresAssignment { private Pair primeInRow(int r) { for (int c = 0; c < cols; c++) { if (mask[r][c] == PRIME) { - return new Pair(r, c); + return new Pair<>(r, c); } } return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 01ee201f99c..ce018da360f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -397,7 +397,7 @@ public class RegionMover extends AbstractHBaseTool { LOG.info("Moving " + regionsToMove.size() + " regions to " + server + " using " + this.maxthreads + " threads.Ack mode:" + this.ack); ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); - List> taskList = new ArrayList>(); + List> taskList = new ArrayList<>(); int counter = 0; while (counter < regionsToMove.size()) { HRegionInfo region = regionsToMove.get(counter); @@ -461,7 +461,7 @@ public class RegionMover extends AbstractHBaseTool { justification="FB is wrong; its size is read") private void unloadRegions(Admin admin, String server, ArrayList regionServers, boolean ack, List movedRegions) throws Exception { - List regionsToMove = new ArrayList();// FindBugs: DLS_DEAD_LOCAL_STORE + List regionsToMove = new ArrayList<>();// FindBugs: DLS_DEAD_LOCAL_STORE regionsToMove = getRegions(this.conf, server); if (regionsToMove.isEmpty()) { LOG.info("No Regions to move....Quitting now"); @@ -481,7 +481,7 @@ public class RegionMover extends AbstractHBaseTool { + regionServers.size() + " servers using " + this.maxthreads + " threads .Ack Mode:" + ack); ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads); - List> taskList = new ArrayList>(); + List> taskList = new ArrayList<>(); int serverIndex = 0; while (counter < regionsToMove.size()) { if (ack) { @@ -636,7 +636,7 @@ public class RegionMover extends AbstractHBaseTool { } private List readRegionsFromFile(String filename) throws IOException { - List regions = new ArrayList(); + List regions = new ArrayList<>(); File f = new File(filename); if (!f.exists()) { return regions; @@ -758,7 +758,7 @@ public class RegionMover extends AbstractHBaseTool { * @return List of servers from the exclude file in format 'hostname:port'. */ private ArrayList readExcludes(String excludeFile) throws IOException { - ArrayList excludeServers = new ArrayList(); + ArrayList excludeServers = new ArrayList<>(); if (excludeFile == null) { return excludeServers; } else { @@ -821,9 +821,8 @@ public class RegionMover extends AbstractHBaseTool { * @throws IOException */ private ArrayList getServers(Admin admin) throws IOException { - ArrayList serverInfo = - new ArrayList(admin.getClusterStatus().getServers()); - ArrayList regionServers = new ArrayList(serverInfo.size()); + ArrayList serverInfo = new ArrayList<>(admin.getClusterStatus().getServers()); + ArrayList regionServers = new ArrayList<>(serverInfo.size()); for (ServerName server : serverInfo) { regionServers.add(server.getServerName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java index c616a251b24..824963028e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java @@ -53,7 +53,7 @@ public class RegionSizeCalculator { /** * Maps each region to its size in bytes. * */ - private final Map sizeMap = new TreeMap(Bytes.BYTES_COMPARATOR); + private final Map sizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable"; private static final long MEGABYTE = 1024L * 1024L; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java index eeef1aea6e9..e07966e4756 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitCalculator.java @@ -62,7 +62,7 @@ public class RegionSplitCalculator { * Invariant: once populated this has 0 entries if empty or at most n+1 values * where n == number of added ranges. */ - private final TreeSet splits = new TreeSet(BYTES_COMPARATOR); + private final TreeSet splits = new TreeSet<>(BYTES_COMPARATOR); /** * This is a map from start key to regions with the same start key. @@ -177,11 +177,11 @@ public class RegionSplitCalculator { */ public static List findBigRanges(Collection bigOverlap, int count) { - List bigRanges = new ArrayList(); + List bigRanges = new ArrayList<>(); // The key is the count of overlaps, // The value is a list of ranges that have that many overlaps - TreeMap> overlapRangeMap = new TreeMap>(); + TreeMap> overlapRangeMap = new TreeMap<>(); for (R r: bigOverlap) { // Calculates the # of overlaps for each region // and populates rangeOverlapMap @@ -206,7 +206,7 @@ public class RegionSplitCalculator { Integer key = Integer.valueOf(overlappedRegions); List ranges = overlapRangeMap.get(key); if (ranges == null) { - ranges = new ArrayList(); + ranges = new ArrayList<>(); overlapRangeMap.put(key, ranges); } ranges.add(r); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index ce1b441651a..87ff0106689 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -768,7 +768,7 @@ public class RegionSplitter { Path hbDir = FSUtils.getRootDir(conf); Path tableDir = FSUtils.getTableDir(hbDir, tableName); Path splitFile = new Path(tableDir, "_balancedSplit"); - return new Pair(tableDir, splitFile); + return new Pair<>(tableDir, splitFile); } static LinkedList> getSplits(final Connection connection, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java index e6b746c9081..9cc6d5a19a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java @@ -94,7 +94,7 @@ public abstract class ServerCommandLine extends Configured implements Tool { public static void logProcessInfo(Configuration conf) { // log environment variables unless asked not to if (conf == null || !conf.getBoolean("hbase.envvars.logging.disabled", false)) { - Set skipWords = new HashSet(DEFAULT_SKIP_WORDS); + Set skipWords = new HashSet<>(DEFAULT_SKIP_WORDS); if (conf != null) { String[] confSkipWords = conf.getStrings("hbase.envvars.logging.skipwords"); if (confSkipWords != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java index 62163bffcc0..05e0f4994a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java @@ -49,15 +49,15 @@ public class SortedCopyOnWriteSet implements SortedSet { private volatile SortedSet internalSet; public SortedCopyOnWriteSet() { - this.internalSet = new TreeSet(); + this.internalSet = new TreeSet<>(); } public SortedCopyOnWriteSet(Collection c) { - this.internalSet = new TreeSet(c); + this.internalSet = new TreeSet<>(c); } public SortedCopyOnWriteSet(Comparator comparator) { - this.internalSet = new TreeSet(comparator); + this.internalSet = new TreeSet<>(comparator); } @Override @@ -92,7 +92,7 @@ public class SortedCopyOnWriteSet implements SortedSet { @Override public synchronized boolean add(E e) { - SortedSet newSet = new TreeSet(internalSet); + SortedSet newSet = new TreeSet<>(internalSet); boolean added = newSet.add(e); internalSet = newSet; return added; @@ -100,7 +100,7 @@ public class SortedCopyOnWriteSet implements SortedSet { @Override public synchronized boolean remove(Object o) { - SortedSet newSet = new TreeSet(internalSet); + SortedSet newSet = new TreeSet<>(internalSet); boolean removed = newSet.remove(o); internalSet = newSet; return removed; @@ -113,7 +113,7 @@ public class SortedCopyOnWriteSet implements SortedSet { @Override public synchronized boolean addAll(Collection c) { - SortedSet newSet = new TreeSet(internalSet); + SortedSet newSet = new TreeSet<>(internalSet); boolean changed = newSet.addAll(c); internalSet = newSet; return changed; @@ -121,7 +121,7 @@ public class SortedCopyOnWriteSet implements SortedSet { @Override public synchronized boolean retainAll(Collection c) { - SortedSet newSet = new TreeSet(internalSet); + SortedSet newSet = new TreeSet<>(internalSet); boolean changed = newSet.retainAll(c); internalSet = newSet; return changed; @@ -129,7 +129,7 @@ public class SortedCopyOnWriteSet implements SortedSet { @Override public synchronized boolean removeAll(Collection c) { - SortedSet newSet = new TreeSet(internalSet); + SortedSet newSet = new TreeSet<>(internalSet); boolean changed = newSet.removeAll(c); internalSet = newSet; return changed; @@ -139,9 +139,9 @@ public class SortedCopyOnWriteSet implements SortedSet { public synchronized void clear() { Comparator comparator = internalSet.comparator(); if (comparator != null) { - internalSet = new TreeSet(comparator); + internalSet = new TreeSet<>(comparator); } else { - internalSet = new TreeSet(); + internalSet = new TreeSet<>(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java index 39f1f41d548..3f5576e4a5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java @@ -118,7 +118,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized boolean add(E e) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); boolean changed = newList.add(e); if (changed) { Collections.sort(newList, comparator); @@ -129,7 +129,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized boolean remove(Object o) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); // Removals in ArrayList won't break sorting boolean changed = newList.remove(o); list = Collections.unmodifiableList(newList); @@ -143,7 +143,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized boolean addAll(Collection c) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); boolean changed = newList.addAll(c); if (changed) { Collections.sort(newList, comparator); @@ -154,7 +154,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized boolean addAll(int index, Collection c) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); boolean changed = newList.addAll(index, c); if (changed) { Collections.sort(newList, comparator); @@ -165,7 +165,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized boolean removeAll(Collection c) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); // Removals in ArrayList won't break sorting boolean changed = newList.removeAll(c); list = Collections.unmodifiableList(newList); @@ -174,7 +174,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized boolean retainAll(Collection c) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); // Removals in ArrayList won't break sorting boolean changed = newList.retainAll(c); list = Collections.unmodifiableList(newList); @@ -193,7 +193,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized E set(int index, E element) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); E result = newList.set(index, element); Collections.sort(list, comparator); list = Collections.unmodifiableList(newList); @@ -202,7 +202,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized void add(int index, E element) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); newList.add(index, element); Collections.sort(list, comparator); list = Collections.unmodifiableList(newList); @@ -210,7 +210,7 @@ public class SortedList implements List, RandomAccess { @Override public synchronized E remove(int index) { - ArrayList newList = new ArrayList(list); + ArrayList newList = new ArrayList<>(list); // Removals in ArrayList won't break sorting E result = newList.remove(index); list = Collections.unmodifiableList(newList); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java index 820da7a747b..82200bd07ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java @@ -66,14 +66,14 @@ public class HFileCorruptionChecker { final FileSystem fs; final CacheConfig cacheConf; final ExecutorService executor; - final Set corrupted = new ConcurrentSkipListSet(); - final Set failures = new ConcurrentSkipListSet(); - final Set quarantined = new ConcurrentSkipListSet(); - final Set missing = new ConcurrentSkipListSet(); - final Set corruptedMobFiles = new ConcurrentSkipListSet(); - final Set failureMobFiles = new ConcurrentSkipListSet(); - final Set missedMobFiles = new ConcurrentSkipListSet(); - final Set quarantinedMobFiles = new ConcurrentSkipListSet(); + final Set corrupted = new ConcurrentSkipListSet<>(); + final Set failures = new ConcurrentSkipListSet<>(); + final Set quarantined = new ConcurrentSkipListSet<>(); + final Set missing = new ConcurrentSkipListSet<>(); + final Set corruptedMobFiles = new ConcurrentSkipListSet<>(); + final Set failureMobFiles = new ConcurrentSkipListSet<>(); + final Set missedMobFiles = new ConcurrentSkipListSet<>(); + final Set quarantinedMobFiles = new ConcurrentSkipListSet<>(); final boolean inQuarantineMode; final AtomicInteger hfilesChecked = new AtomicInteger(); final AtomicInteger mobFilesChecked = new AtomicInteger(); @@ -343,7 +343,7 @@ public class HFileCorruptionChecker { } // Parallelize check at the region dir level - List rdcs = new ArrayList(rds.size() + 1); + List rdcs = new ArrayList<>(rds.size() + 1); List> rdFutures; for (FileStatus rdFs : rds) { @@ -451,14 +451,14 @@ public class HFileCorruptionChecker { * @return the set of check failure file paths after checkTables is called. */ public Collection getFailures() { - return new HashSet(failures); + return new HashSet<>(failures); } /** * @return the set of corrupted file paths after checkTables is called. */ public Collection getCorrupted() { - return new HashSet(corrupted); + return new HashSet<>(corrupted); } /** @@ -472,7 +472,7 @@ public class HFileCorruptionChecker { * @return the set of successfully quarantined paths after checkTables is called. */ public Collection getQuarantined() { - return new HashSet(quarantined); + return new HashSet<>(quarantined); } /** @@ -480,21 +480,21 @@ public class HFileCorruptionChecker { * compaction or flushes. */ public Collection getMissing() { - return new HashSet(missing); + return new HashSet<>(missing); } /** * @return the set of check failure mob file paths after checkTables is called. */ public Collection getFailureMobFiles() { - return new HashSet(failureMobFiles); + return new HashSet<>(failureMobFiles); } /** * @return the set of corrupted mob file paths after checkTables is called. */ public Collection getCorruptedMobFiles() { - return new HashSet(corruptedMobFiles); + return new HashSet<>(corruptedMobFiles); } /** @@ -508,7 +508,7 @@ public class HFileCorruptionChecker { * @return the set of successfully quarantined paths after checkTables is called. */ public Collection getQuarantinedMobFiles() { - return new HashSet(quarantinedMobFiles); + return new HashSet<>(quarantinedMobFiles); } /** @@ -516,7 +516,7 @@ public class HFileCorruptionChecker { * deletion/moves from compaction. */ public Collection getMissedMobFiles() { - return new HashSet(missedMobFiles); + return new HashSet<>(missedMobFiles); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index bdd319db0df..9dd85d86bd9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -115,7 +115,7 @@ public abstract class AbstractFSWALProvider> implemen if (wal == null) { return Collections.emptyList(); } - List wals = new ArrayList(1); + List wals = new ArrayList<>(1); wals.add(wal); return wals; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java index 5b323474f0e..81b1c000d2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java @@ -38,8 +38,7 @@ public class BoundedGroupingStrategy implements RegionGroupingStrategy{ static final String NUM_REGION_GROUPS = "hbase.wal.regiongrouping.numgroups"; static final int DEFAULT_NUM_REGION_GROUPS = 2; - private ConcurrentHashMap groupNameCache = - new ConcurrentHashMap(); + private ConcurrentHashMap groupNameCache = new ConcurrentHashMap<>(); private AtomicInteger counter = new AtomicInteger(0); private String[] groupNames; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 5bee923fe6a..b442f07c1ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -68,7 +68,7 @@ class DisabledWALProvider implements WALProvider { @Override public List getWALs() throws IOException { - List wals = new ArrayList(1); + List wals = new ArrayList<>(1); wals.add(disabled); return wals; } @@ -89,8 +89,7 @@ class DisabledWALProvider implements WALProvider { } private static class DisabledWAL implements WAL { - protected final List listeners = - new CopyOnWriteArrayList(); + protected final List listeners = new CopyOnWriteArrayList<>(); protected final Path path; protected final WALCoprocessorHost coprocessorHost; protected final AtomicBoolean closed = new AtomicBoolean(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 25e70d79656..dee36e8f9ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -171,7 +171,7 @@ public class RegionGroupingProvider implements WALProvider { @Override public List getWALs() throws IOException { - List wals = new ArrayList(); + List wals = new ArrayList<>(); for (WALProvider provider : cached.values()) { wals.addAll(provider.getWALs()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index abdc20c2a0b..114715fabec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -93,7 +93,7 @@ public class WALFactory { // The meta updates are written to a different wal. If this // regionserver holds meta regions, then this ref will be non-null. // lazily intialized; most RegionServers don't deal with META - final AtomicReference metaProvider = new AtomicReference(); + final AtomicReference metaProvider = new AtomicReference<>(); /** * Configuration-specified WAL Reader used when a custom reader is requested @@ -368,7 +368,7 @@ public class WALFactory { // untangle the reliance on state in the filesystem. They rely on singleton // WALFactory that just provides Reader / Writers. // For now, first Configuration object wins. Practically this just impacts the reader/writer class - private static final AtomicReference singleton = new AtomicReference(); + private static final AtomicReference singleton = new AtomicReference<>(); private static final String SINGLETON_ID = WALFactory.class.getName(); // public only for FSHLog diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java index 276ab3688f6..9a8003a3bbb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java @@ -192,19 +192,19 @@ public class WALKey implements SequenceId, Comparable { public WALKey() { init(null, null, 0L, HConstants.LATEST_TIMESTAMP, - new ArrayList(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, null); + new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, null); } public WALKey(final NavigableMap replicationScope) { init(null, null, 0L, HConstants.LATEST_TIMESTAMP, - new ArrayList(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope); + new ArrayList<>(), HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope); } @VisibleForTesting public WALKey(final byte[] encodedRegionName, final TableName tablename, long logSeqNum, final long now, UUID clusterId) { - List clusterIds = new ArrayList(1); + List clusterIds = new ArrayList<>(1); clusterIds.add(clusterId); init(encodedRegionName, tablename, logSeqNum, now, clusterIds, HConstants.NO_NONCE, HConstants.NO_NONCE, null, null); @@ -543,7 +543,7 @@ public class WALKey implements SequenceId, Comparable { * @return a Map containing data from this key */ public Map toStringMap() { - Map stringMap = new HashMap(); + Map stringMap = new HashMap<>(); stringMap.put("table", tablename); stringMap.put("region", Bytes.toStringBinary(encodedRegionName)); stringMap.put("sequence", getSequenceId()); @@ -684,7 +684,7 @@ public class WALKey implements SequenceId, Comparable { } this.replicationScope = null; if (walKey.getScopesCount() > 0) { - this.replicationScope = new TreeMap(Bytes.BYTES_COMPARATOR); + this.replicationScope = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (FamilyScope scope : walKey.getScopesList()) { byte[] family = (compressionContext == null) ? scope.getFamily().toByteArray() : uncompressor.uncompress(scope.getFamily(), compressionContext.familyDict); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index a6fd85fb842..37473e9b30f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -283,10 +283,10 @@ public class WALPrettyPrinter { if (region != null && !((String) txn.get("region")).equals(region)) continue; // initialize list into which we will store atomic actions - List actions = new ArrayList(); + List actions = new ArrayList<>(); for (Cell cell : edit.getCells()) { // add atomic operation to txn - Map op = new HashMap(toStringMap(cell)); + Map op = new HashMap<>(toStringMap(cell)); if (outputValues) op.put("value", Bytes.toStringBinary(CellUtil.cloneValue(cell))); // check row output filter if (row == null || ((String) op.get("row")).equals(row)) { @@ -328,7 +328,7 @@ public class WALPrettyPrinter { } private static Map toStringMap(Cell cell) { - Map stringMap = new HashMap(); + Map stringMap = new HashMap<>(); stringMap.put("row", Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); stringMap.put("family", Bytes.toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(), @@ -339,7 +339,7 @@ public class WALPrettyPrinter { stringMap.put("timestamp", cell.getTimestamp()); stringMap.put("vlen", cell.getValueLength()); if (cell.getTagsLength() > 0) { - List tagsString = new ArrayList(); + List tagsString = new ArrayList<>(); Iterator tagsIterator = CellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { Tag tag = tagsIterator.next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 2cf2c6b69a9..d87c71bf971 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -158,11 +158,10 @@ public class WALSplitter { protected boolean distributedLogReplay; // Map encodedRegionName -> lastFlushedSequenceId - protected Map lastFlushedSequenceIds = new ConcurrentHashMap(); + protected Map lastFlushedSequenceIds = new ConcurrentHashMap<>(); // Map encodedRegionName -> maxSeqIdInStores - protected Map> regionMaxSeqIdInStores = - new ConcurrentHashMap>(); + protected Map> regionMaxSeqIdInStores = new ConcurrentHashMap<>(); // Failed region server that the wal file being split belongs to protected String failedServerName = ""; @@ -245,7 +244,7 @@ public class WALSplitter { FileSystem fs, Configuration conf, final WALFactory factory) throws IOException { final FileStatus[] logfiles = SplitLogManager.getFileList(conf, Collections.singletonList(logDir), null); - List splits = new ArrayList(); + List splits = new ArrayList<>(); if (logfiles != null && logfiles.length > 0) { for (FileStatus logfile: logfiles) { WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, null, null, @@ -331,7 +330,7 @@ public class WALSplitter { } } else if (sequenceIdChecker != null) { RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(region); - Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) { maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), storeSeqId.getSequenceId()); @@ -447,8 +446,8 @@ public class WALSplitter { private static void finishSplitLogFile(Path rootdir, Path oldLogDir, Path logPath, Configuration conf) throws IOException { - List processedLogs = new ArrayList(); - List corruptedLogs = new ArrayList(); + List processedLogs = new ArrayList<>(); + List corruptedLogs = new ArrayList<>(); FileSystem fs; fs = rootdir.getFileSystem(conf); if (ZKSplitLog.isCorrupted(rootdir, logPath.getName(), fs)) { @@ -614,7 +613,7 @@ public class WALSplitter { */ public static NavigableSet getSplitEditFilesSorted(final FileSystem fs, final Path regiondir) throws IOException { - NavigableSet filesSorted = new TreeSet(); + NavigableSet filesSorted = new TreeSet<>(); Path editsdir = getRegionDirRecoveredEditsDir(regiondir); if (!fs.exists(editsdir)) return filesSorted; @@ -872,7 +871,7 @@ public class WALSplitter { public static class PipelineController { // If an exception is thrown by one of the other threads, it will be // stored here. - AtomicReference thrown = new AtomicReference(); + AtomicReference thrown = new AtomicReference<>(); // Wait/notify for when data has been produced by the writer thread, // consumed by the reader thread, or an exception occurred @@ -906,13 +905,12 @@ public class WALSplitter { public static class EntryBuffers { PipelineController controller; - Map buffers = - new TreeMap(Bytes.BYTES_COMPARATOR); + Map buffers = new TreeMap<>(Bytes.BYTES_COMPARATOR); /* Track which regions are currently in the middle of writing. We don't allow an IO thread to pick up bytes from a region if we're already writing data for that region in a different IO thread. */ - Set currentlyWriting = new TreeSet(Bytes.BYTES_COMPARATOR); + Set currentlyWriting = new TreeSet<>(Bytes.BYTES_COMPARATOR); long totalBuffered = 0; long maxHeapUsage; @@ -1027,7 +1025,7 @@ public class WALSplitter { RegionEntryBuffer(TableName tableName, byte[] region) { this.tableName = tableName; this.encodedRegionName = region; - this.entryBuffer = new LinkedList(); + this.entryBuffer = new LinkedList<>(); } long appendEntry(Entry entry) { @@ -1148,7 +1146,7 @@ public class WALSplitter { /* Set of regions which we've decided should not output edits */ protected final Set blacklistedRegions = Collections - .synchronizedSet(new TreeSet(Bytes.BYTES_COMPARATOR)); + .synchronizedSet(new TreeSet<>(Bytes.BYTES_COMPARATOR)); protected boolean closeAndCleanCompleted = false; @@ -1360,7 +1358,7 @@ public class WALSplitter { private List close() throws IOException { Preconditions.checkState(!closeAndCleanCompleted); - final List paths = new ArrayList(); + final List paths = new ArrayList<>(); final List thrown = Lists.newArrayList(); ThreadPoolExecutor closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() { @@ -1372,8 +1370,7 @@ public class WALSplitter { return t; } }); - CompletionService completionService = - new ExecutorCompletionService(closeThreadPool); + CompletionService completionService = new ExecutorCompletionService<>(closeThreadPool); for (final Map.Entry writersEntry : writers.entrySet()) { if (LOG.isTraceEnabled()) { LOG.trace("Submitting close of " + ((WriterAndPath)writersEntry.getValue()).p); @@ -1558,7 +1555,7 @@ public class WALSplitter { } // Create the array list for the cells that aren't filtered. // We make the assumption that most cells will be kept. - ArrayList keptCells = new ArrayList(logEntry.getEdit().getCells().size()); + ArrayList keptCells = new ArrayList<>(logEntry.getEdit().getCells().size()); for (Cell cell : logEntry.getEdit().getCells()) { if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { keptCells.add(cell); @@ -1639,7 +1636,7 @@ public class WALSplitter { */ @Override public Map getOutputCounts() { - TreeMap ret = new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); synchronized (writers) { for (Map.Entry entry : writers.entrySet()) { ret.put(entry.getKey(), entry.getValue().editsWritten); @@ -1705,8 +1702,7 @@ public class WALSplitter { private final Set recoveredRegions = Collections.synchronizedSet(new HashSet()); private final Map writers = new ConcurrentHashMap<>(); // online encoded region name -> region location map - private final Map onlineRegions = - new ConcurrentHashMap(); + private final Map onlineRegions = new ConcurrentHashMap<>(); private final Map tableNameToHConnectionMap = Collections .synchronizedMap(new TreeMap()); @@ -1859,7 +1855,7 @@ public class WALSplitter { + encodeRegionNameStr); lastFlushedSequenceIds.put(encodeRegionNameStr, Long.MAX_VALUE); if (nonExistentTables == null) { - nonExistentTables = new TreeSet(); + nonExistentTables = new TreeSet<>(); } nonExistentTables.add(table); this.skippedEdits.incrementAndGet(); @@ -1906,7 +1902,7 @@ public class WALSplitter { Collections.synchronizedList(new ArrayList>()); serverToBufferQueueMap.put(locKey, queue); } - queue.add(new Pair(loc, entry)); + queue.add(new Pair<>(loc, entry)); } // store regions we have recovered so far addToRecoveredRegions(loc.getRegionInfo().getEncodedName()); @@ -1957,7 +1953,7 @@ public class WALSplitter { loc.getRegionInfo().getEncodedName()); if (ids != null) { lastFlushedSequenceId = ids.getLastFlushedSequenceId(); - Map storeIds = new TreeMap(Bytes.BYTES_COMPARATOR); + Map storeIds = new TreeMap<>(Bytes.BYTES_COMPARATOR); List maxSeqIdInStores = ids.getStoreSequenceIdList(); for (StoreSequenceId id : maxSeqIdInStores) { storeIds.put(id.getFamilyName().toByteArray(), id.getSequenceId()); @@ -2102,7 +2098,7 @@ public class WALSplitter { if (hasEditsInDisablingOrDisabledTables) { splits = logRecoveredEditsOutputSink.finishWritingAndClose(); } else { - splits = new ArrayList(); + splits = new ArrayList<>(); } // returns an empty array in order to keep interface same as old way return splits; @@ -2316,13 +2312,13 @@ public class WALSplitter { if (entry == null) { // return an empty array - return new ArrayList(); + return new ArrayList<>(); } long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber(); int count = entry.getAssociatedCellCount(); - List mutations = new ArrayList(); + List mutations = new ArrayList<>(); Cell previousCell = null; Mutation m = null; WALKey key = null; @@ -2369,7 +2365,7 @@ public class WALSplitter { if (logEntry != null) { org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = entry.getKey(); - List clusterIds = new ArrayList(walKeyProto.getClusterIdsCount()); + List clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount()); for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) { clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java index e6d3b7fc5da..32e08620ed1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java @@ -50,7 +50,7 @@ public class DrainingServerTracker extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(DrainingServerTracker.class); private ServerManager serverManager; - private final NavigableSet drainingServers = new TreeSet(); + private final NavigableSet drainingServers = new TreeSet<>(); private Abortable abortable; public DrainingServerTracker(ZooKeeperWatcher watcher, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java index ff730736e4a..7dea2696259 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java @@ -80,9 +80,9 @@ public class MiniZooKeeperCluster { this.started = false; this.configuration = configuration; activeZKServerIndex = -1; - zooKeeperServers = new ArrayList(); - clientPortList = new ArrayList(); - standaloneServerFactoryList = new ArrayList(); + zooKeeperServers = new ArrayList<>(); + clientPortList = new ArrayList<>(); + standaloneServerFactoryList = new ArrayList<>(); connectionTimeout = configuration.getInt(HConstants.ZK_SESSION_TIMEOUT + ".localHBaseCluster", DEFAULT_CONNECTION_TIMEOUT); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java index 19d2d001194..69cd2331762 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java @@ -48,8 +48,7 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.Private public class RegionServerTracker extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); - private NavigableMap regionServers = - new TreeMap(); + private NavigableMap regionServers = new TreeMap<>(); private ServerManager serverManager; private Server server; @@ -154,7 +153,7 @@ public class RegionServerTracker extends ZooKeeperListener { */ public List getOnlineServers() { synchronized (this.regionServers) { - return new ArrayList(this.regionServers.keySet()); + return new ArrayList<>(this.regionServers.keySet()); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java index 455cfd262de..b96924d49c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java @@ -36,7 +36,7 @@ import java.util.List; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ZKServerTool { public static ServerName[] readZKNodes(Configuration conf) { - List hosts = new LinkedList(); + List hosts = new LinkedList<>(); String quorum = conf.get(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST); String[] values = quorum.split(","); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 897dad7eb28..0f8a289d8d1 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -372,11 +372,11 @@ if ( fqtn != null ) { long totalStoreFileCount = 0; long totalMemSize = 0; String urlRegionServer = null; - Map regDistribution = new TreeMap(); - Map primaryRegDistribution = new TreeMap(); + Map regDistribution = new TreeMap<>(); + Map primaryRegDistribution = new TreeMap<>(); List regions = r.getAllRegionLocations(); - Map regionsToLoad = new LinkedHashMap(); - Map regionsToServer = new LinkedHashMap(); + Map regionsToLoad = new LinkedHashMap<>(); + Map regionsToServer = new LinkedHashMap<>(); for (HRegionLocation hriEntry : regions) { HRegionInfo regionInfo = hriEntry.getRegionInfo(); ServerName addr = hriEntry.getServerName(); @@ -448,7 +448,7 @@ ShowDetailName&Start/End Key results = new ArrayList(); + List results = new ArrayList<>(); boolean hasMore = true; while (hasMore) { hasMore = scanner.next(results); @@ -2367,7 +2367,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { throws IOException { Table meta = getConnection().getTable(TableName.META_TABLE_NAME); Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); - List newRegions = new ArrayList(startKeys.length); + List newRegions = new ArrayList<>(startKeys.length); MetaTableAccessor .updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED); // add custom ones @@ -2426,7 +2426,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public List getMetaTableRows() throws IOException { // TODO: Redo using MetaTableAccessor class Table t = getConnection().getTable(TableName.META_TABLE_NAME); - List rows = new ArrayList(); + List rows = new ArrayList<>(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { LOG.info("getMetaTableRows: row -> " + @@ -2446,7 +2446,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public List getMetaTableRows(TableName tableName) throws IOException { // TODO: Redo using MetaTableAccessor. Table t = getConnection().getTable(TableName.META_TABLE_NAME); - List rows = new ArrayList(); + List rows = new ArrayList<>(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { HRegionInfo info = MetaTableAccessor.getHRegionInfo(result); @@ -3219,7 +3219,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public static NavigableSet getAllOnlineRegions(MiniHBaseCluster cluster) throws IOException { - NavigableSet online = new TreeSet(); + NavigableSet online = new TreeSet<>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { try { for (HRegionInfo region : @@ -3391,7 +3391,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { // readpoint 0. 0); - List result = new ArrayList(); + List result = new ArrayList<>(); scanner.next(result); if (!result.isEmpty()) { // verify that we are on the row we want: @@ -3601,7 +3601,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { private static final int MAX_RANDOM_PORT = 0xfffe; /** A set of ports that have been claimed using {@link #randomFreePort()}. */ - private final Set takenRandomPorts = new HashSet(); + private final Set takenRandomPorts = new HashSet<>(); private final Random random; private final AvailablePortChecker portChecker; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java index e5334bfb3a8..268f79cfa18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java @@ -41,7 +41,7 @@ public class HTestConst { public static final byte[] DEFAULT_CF_BYTES = Bytes.toBytes(DEFAULT_CF_STR); public static final Set DEFAULT_CF_STR_SET = - Collections.unmodifiableSet(new HashSet( + Collections.unmodifiableSet(new HashSet<>( Arrays.asList(new String[] { DEFAULT_CF_STR }))); public static final String DEFAULT_ROW_STR = "MyTestRow"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java index 42e28117bac..9a1515bac2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java @@ -68,7 +68,7 @@ public class MetaMockingUtil { */ public static Result getMetaTableRowResult(HRegionInfo region, final ServerName sn, HRegionInfo splita, HRegionInfo splitb) throws IOException { - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); if (region != null) { kvs.add(new KeyValue( region.getRegionName(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index a8887d4320a..55529c64b39 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -672,7 +672,7 @@ public class MiniHBaseCluster extends HBaseCluster { } public List getRegions(TableName tableName) { - List ret = new ArrayList(); + List ret = new ArrayList<>(); for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getOnlineRegionsLocalContext()) { @@ -770,7 +770,7 @@ public class MiniHBaseCluster extends HBaseCluster { } public List findRegionsForTable(TableName tableName) { - ArrayList ret = new ArrayList(); + ArrayList ret = new ArrayList<>(); for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getOnlineRegions(tableName)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 5e2a70fd7cf..7740e66ae19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -63,9 +63,9 @@ import com.google.protobuf.Service; */ public class MockRegionServerServices implements RegionServerServices { protected static final Log LOG = LogFactory.getLog(MockRegionServerServices.class); - private final Map regions = new HashMap(); + private final Map regions = new HashMap<>(); private final ConcurrentSkipListMap rit = - new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); private HFileSystem hfs = null; private final Configuration conf; private ZooKeeperWatcher zkw = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java index 7e251e77ecd..cf07b42a129 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java @@ -39,7 +39,7 @@ public abstract class MultithreadedTestUtil { private Throwable err = null; private boolean stopped = false; private int threadDoneCount = 0; - private Set testThreads = new HashSet(); + private Set testThreads = new HashSet<>(); public TestContext(Configuration configuration) { this.conf = configuration; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index b7e4a7101db..f8345b120a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -146,7 +146,7 @@ public class PerformanceEvaluation extends Configured implements Tool { private static final BigDecimal BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024); private static final TestOptions DEFAULT_OPTS = new TestOptions(); - private static Map COMMANDS = new TreeMap(); + private static Map COMMANDS = new TreeMap<>(); private static final Path PERF_EVAL_DIR = new Path("performance_evaluation"); static { @@ -536,7 +536,7 @@ public class PerformanceEvaluation extends Configured implements Tool { Path inputFile = new Path(inputDir, JOB_INPUT_FILENAME); PrintStream out = new PrintStream(fs.create(inputFile)); // Make input random. - Map m = new TreeMap(); + Map m = new TreeMap<>(); Hash h = MurmurHash.getInstance(); int perClientRows = (opts.totalRows / opts.numClientThreads); try { @@ -1311,7 +1311,7 @@ public class PerformanceEvaluation extends Configured implements Tool { protected Pair generateStartAndStopRows(int maxRange) { int start = this.rand.nextInt(Integer.MAX_VALUE) % opts.totalRows; int stop = start + maxRange; - return new Pair(format(start), format(stop)); + return new Pair<>(format(start), format(stop)); } @Override @@ -1375,7 +1375,7 @@ public class PerformanceEvaluation extends Configured implements Tool { consistency = options.replicas == DEFAULT_OPTS.replicas ? null : Consistency.TIMELINE; if (opts.multiGet > 0) { LOG.info("MultiGet enabled. Sending GETs in batches of " + opts.multiGet + "."); - this.gets = new ArrayList(opts.multiGet); + this.gets = new ArrayList<>(opts.multiGet); } } @@ -2207,7 +2207,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } try { - LinkedList argv = new LinkedList(); + LinkedList argv = new LinkedList<>(); argv.addAll(Arrays.asList(args)); TestOptions opts = parseOpts(argv); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java index 3809a134fdf..e2350e81185 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java @@ -67,7 +67,7 @@ public class PerformanceEvaluationCommons { public static void concurrentReads(final Runnable r) { final int count = 1; long now = System.currentTimeMillis(); - List threads = new ArrayList(count); + List threads = new ArrayList<>(count); for (int i = 0; i < count; i++) { threads.add(new Thread(r, "concurrentRead-" + i)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java index 06b98f79aa0..23ca57ff34e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java @@ -39,7 +39,7 @@ public class TestCheckTestClasses { */ @Test public void checkClasses() throws Exception { - List> badClasses = new java.util.ArrayList>(); + List> badClasses = new java.util.ArrayList<>(); ClassTestFinder classFinder = new ClassTestFinder(); for (Class c : classFinder.findClasses(false)) { if (ClassTestFinder.getCategoryAnnotations(c).length == 0) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java index 5cc7ed90073..8d19c1b32b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java @@ -158,7 +158,7 @@ public class TestGlobalMemStoreSize { } private List getOnlineRegionServers() { - List list = new ArrayList(); + List list = new ArrayList<>(); for (JVMClusterUtil.RegionServerThread rst : cluster.getRegionServerThreads()) { if (rst.getRegionServer().isOnline()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java index 2329fc274f1..06cfdcffefa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java @@ -50,7 +50,7 @@ public class TestHDFSBlocksDistribution { public class MockHDFSBlocksDistribution extends HDFSBlocksDistribution { public Map getHostAndWeights() { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); map.put("test", new HostAndWeight(null, 100)); return map; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java index 870ebb31511..9915f9953e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java @@ -87,7 +87,7 @@ public class TestMetaTableAccessorNoCluster { public void testGetHRegionInfo() throws IOException { assertNull(MetaTableAccessor.getHRegionInfo(new Result())); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); Result r = Result.create(kvs); assertNull(MetaTableAccessor.getHRegionInfo(r)); @@ -141,7 +141,7 @@ public class TestMetaTableAccessorNoCluster { // show. We will know if they happened or not because we will ask // mockito at the end of this test to verify that scan was indeed // called the wanted number of times. - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); final byte [] rowToVerify = Bytes.toBytes("rowToVerify"); kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, @@ -152,7 +152,7 @@ public class TestMetaTableAccessorNoCluster { kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode()))); - final List cellScannables = new ArrayList(1); + final List cellScannables = new ArrayList<>(1); cellScannables.add(Result.create(kvs)); final ScanResponse.Builder builder = ScanResponse.newBuilder(); for (CellScannable result : cellScannables) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java index 57248b6d0d5..f3ea81411af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -274,7 +274,7 @@ public class TestPartialResultsFromClientSide { int iterationCount = 0; while (oneShotResult != null && oneShotResult.rawCells() != null) { - List aggregatePartialCells = new ArrayList(); + List aggregatePartialCells = new ArrayList<>(); do { partialResult = partialScanner.next(); assertTrue("Partial Result is null. iteration: " + iterationCount, partialResult != null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java index 03c55242229..283d79d2a0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java @@ -231,7 +231,7 @@ public class TestRegionRebalancing { } private List getOnlineRegionServers() { - List list = new ArrayList(); + List list = new ArrayList<>(); for (JVMClusterUtil.RegionServerThread rst : UTIL.getHBaseCluster().getRegionServerThreads()) { if (rst.getRegionServer().isOnline()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java index 953756e0887..ad63cc80b0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java @@ -280,7 +280,7 @@ public class TestServerSideScanMetricsFromClientSide { filter = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS[0], CompareOp.NOT_EQUAL, VALUE); testRowsFilteredMetric(baseScan, filter, ROWS.length); - List filters = new ArrayList(); + List filters = new ArrayList<>(); filters.add(new RowFilter(CompareOp.EQUAL, new BinaryComparator(ROWS[0]))); filters.add(new RowFilter(CompareOp.EQUAL, new BinaryComparator(ROWS[3]))); int numberOfMatchingRowFilters = filters.size(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java index d4f7cdd2e12..c4abd895c8c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java @@ -222,7 +222,7 @@ public class TestStochasticBalancerJmxMetrics extends BalancerTestBase { target = new ObjectName("Hadoop", pairs); MBeanInfo beanInfo = mb.getMBeanInfo(target); - Set existingAttrs = new HashSet(); + Set existingAttrs = new HashSet<>(); for (MBeanAttributeInfo attrInfo : beanInfo.getAttributes()) { existingAttrs.add(attrInfo.getName()); } @@ -255,7 +255,7 @@ public class TestStochasticBalancerJmxMetrics extends BalancerTestBase { * Given the tables and functions, return metrics names that should exist in JMX */ private Set getExpectedJmxMetrics(String[] tableNames, String[] functionNames) { - Set ret = new HashSet(); + Set ret = new HashSet<>(); for (String tableName : tableNames) { ret.add(StochasticLoadBalancer.composeAttributeName(tableName, "Overall")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index 284251fb5fd..1acb842bc61 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -193,7 +193,7 @@ public class TestHFileArchiving { FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null); Assert.assertNotNull("No files in the region directory", regionFiles); if (LOG.isDebugEnabled()) { - List files = new ArrayList(); + List files = new ArrayList<>(); for (FileStatus file : regionFiles) { files.add(file.getPath()); } @@ -269,7 +269,7 @@ public class TestHFileArchiving { private void assertArchiveFiles(FileSystem fs, List storeFiles, long timeout) throws IOException { long end = System.currentTimeMillis() + timeout; Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()); - List archivedFiles = new ArrayList(); + List archivedFiles = new ArrayList<>(); // We have to ensure that the DeleteTableHandler is finished. HBaseAdmin.deleteXXX() can return before all files // are archived. We should fix HBASE-5487 and fix synchronous operations from admin. @@ -434,7 +434,7 @@ public class TestHFileArchiving { return true; } }); - return recurseOnFiles(fs, files, new ArrayList()); + return recurseOnFiles(fs, files, new ArrayList<>()); } /** Recursively lookup all the file names under the file[] array **/ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 64139ee5312..fc56ebd4aff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -80,7 +80,7 @@ public class TestZooKeeperTableArchiveClient { private static final byte[] TEST_FAM = Bytes.toBytes("fam"); private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME); private static ZKTableArchiveClient archivingClient; - private final List toCleanup = new ArrayList(); + private final List toCleanup = new ArrayList<>(); private static ClusterConnection CONNECTION; private static RegionServerServices rss; @@ -178,7 +178,7 @@ public class TestZooKeeperTableArchiveClient { // create the region HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM); HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); - List regions = new ArrayList(); + List regions = new ArrayList<>(); regions.add(region); when(rss.getOnlineRegions()).thenReturn(regions); final CompactedHFilesDischarger compactionCleaner = @@ -231,7 +231,7 @@ public class TestZooKeeperTableArchiveClient { // create the region HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM); HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); - List regions = new ArrayList(); + List regions = new ArrayList<>(); regions.add(region); when(rss.getOnlineRegions()).thenReturn(regions); final CompactedHFilesDischarger compactionCleaner = @@ -241,7 +241,7 @@ public class TestZooKeeperTableArchiveClient { // create the another table that we don't archive hcd = new HColumnDescriptor(TEST_FAM); HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd); - regions = new ArrayList(); + regions = new ArrayList<>(); regions.add(otherRegion); when(rss.getOnlineRegions()).thenReturn(regions); final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, @@ -388,7 +388,7 @@ public class TestZooKeeperTableArchiveClient { return null; } - List allFiles = new ArrayList(); + List allFiles = new ArrayList<>(); for (FileStatus file : files) { if (file.isDirectory()) { List subFiles = getAllFiles(fs, file.getPath()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index f6942108c22..7b69db447ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -1002,7 +1002,7 @@ public class TestAdmin1 { // insert rows into column families. The number of rows that have values // in a specific column family is decided by rowCounts[familyIndex] for (int index = 0; index < familyNames.length; index++) { - ArrayList puts = new ArrayList(rowCounts[index]); + ArrayList puts = new ArrayList<>(rowCounts[index]); for (int i = 0; i < rowCounts[index]; i++) { byte[] k = Bytes.toBytes(i); Put put = new Put(k); @@ -1143,7 +1143,7 @@ public class TestAdmin1 { } while (oldRegions.size() != 9); //3 regions * 3 replicas // write some data to the table Table ht = TEST_UTIL.getConnection().getTable(tableName); - List puts = new ArrayList(); + List puts = new ArrayList<>(); byte[] qualifier = "c".getBytes(); Put put = new Put(new byte[]{(byte)'1'}); put.addColumn(cf, qualifier, "100".getBytes()); @@ -1295,7 +1295,7 @@ public class TestAdmin1 { byte[] q1 = Bytes.toBytes("q1"); byte[] v1 = Bytes.toBytes("v1"); p.addColumn(Bytes.toBytes(fn), q1, v1); - List puts = new ArrayList(2); + List puts = new ArrayList<>(2); puts.add(p); p = new Put(Bytes.toBytes("rep1_rk")); p.addColumn(Bytes.toBytes(fn1), q1, v1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index eb15d919c49..001440137fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -776,7 +776,7 @@ public class TestAdmin2 { // Drain all region servers. Collection clusterServers = admin.getClusterStatus().getServers(); - drainingServers = new ArrayList(); + drainingServers = new ArrayList<>(); for (ServerName server : clusterServers) { drainingServers.add(server); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index ab09c5ef346..e7d7f0af065 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -231,7 +231,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { } catch (InterruptedException e) { } } - List cacheList = new ArrayList(); + List cacheList = new ArrayList<>(); Iterator iterator = cache.iterator(); // evict all the blocks while (iterator.hasNext()) { @@ -379,7 +379,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { Thread evictorThread = new Thread() { @Override public void run() { - List cacheList = new ArrayList(); + List cacheList = new ArrayList<>(); Iterator iterator = cache.iterator(); // evict all the blocks while (iterator.hasNext()) { @@ -416,7 +416,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { } assertEquals("Count the rows", count, 2); iterator = cache.iterator(); - List newCacheList = new ArrayList(); + List newCacheList = new ArrayList<>(); while (iterator.hasNext()) { CachedBlock next = iterator.next(); BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index f453662953d..023095ff104 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -1329,7 +1329,7 @@ public class TestBlockEvictionFromClient { private static class MultiGetThread extends Thread { private final Table table; - private final List gets = new ArrayList(); + private final List gets = new ArrayList<>(); public MultiGetThread(Table table) { this.table = table; } @@ -1565,7 +1565,7 @@ public class TestBlockEvictionFromClient { static final AtomicInteger countOfGets = new AtomicInteger(0); static final AtomicBoolean waitForGets = new AtomicBoolean(false); static final AtomicBoolean throwException = new AtomicBoolean(false); - private static final AtomicReference cdl = new AtomicReference( + private static final AtomicReference cdl = new AtomicReference<>( new CountDownLatch(0)); @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java index c1cb0a60b7f..62ceca3d8b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java @@ -96,7 +96,7 @@ public class TestClientOperationInterrupt { final AtomicInteger badEx = new AtomicInteger(0); final AtomicInteger noInt = new AtomicInteger(0); final AtomicInteger done = new AtomicInteger(0); - List threads = new ArrayList(); + List threads = new ArrayList<>(); final int nbThread = 100; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java index 69729f04cea..c3e4a2862cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java @@ -42,7 +42,7 @@ public class TestConnectionUtils { retries[i] = ConnectionUtils.getPauseTime(baseTime, 0); } - Set retyTimeSet = new TreeSet(); + Set retyTimeSet = new TreeSet<>(); for (long l : retries) { /*make sure that there is some jitter but only 1%*/ assertTrue(l >= baseTime); @@ -62,7 +62,7 @@ public class TestConnectionUtils { long minTimeExpected = (long) (basePause * 0.75f); int testTries = 100; - Set timeSet = new TreeSet(); + Set timeSet = new TreeSet<>(); for (int i = 0; i < testTries; i++) { long withJitter = ConnectionUtils.addJitter(basePause, 0.5f); assertTrue(withJitter >= minTimeExpected); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java index 2aed0ff0672..465bdfb0426 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java @@ -163,7 +163,7 @@ public class TestFastFail { final AtomicInteger numBlockedWorkers = new AtomicInteger(0); final AtomicInteger numPreemptiveFastFailExceptions = new AtomicInteger(0); - List> futures = new ArrayList>(); + List> futures = new ArrayList<>(); for (int i = 0; i < nThreads; i++) { futures.add(service.submit(new Callable() { /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index aab586d998f..b1a0d3cab8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -190,7 +190,7 @@ public class TestFromClientSide { @Test public void testDuplicateAppend() throws Exception { HTableDescriptor hdt = TEST_UTIL.createTableDescriptor(name.getMethodName()); - Map kvs = new HashMap(); + Map kvs = new HashMap<>(); kvs.put(HConnectionTestingUtility.SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); hdt.addCoprocessor(HConnectionTestingUtility.SleepAtFirstRpcCall.class.getName(), null, 1, kvs); TEST_UTIL.createTable(hdt, new byte[][] { ROW }).close(); @@ -2299,7 +2299,7 @@ public class TestFromClientSide { result = ht.get(get); assertTrue(result.size() == 1); } - ArrayList deletes = new ArrayList(); + ArrayList deletes = new ArrayList<>(); for (int i = 0; i < 10; i++) { byte [] bytes = Bytes.toBytes(i); delete = new Delete(bytes); @@ -4707,7 +4707,7 @@ public class TestFromClientSide { final Object waitLock = new Object(); ExecutorService executorService = Executors.newFixedThreadPool(numVersions); - final AtomicReference error = new AtomicReference(null); + final AtomicReference error = new AtomicReference<>(null); for (int versions = numVersions; versions < numVersions * 2; versions++) { final int versionsCopy = versions; executorService.submit(new Callable() { @@ -5315,7 +5315,7 @@ public class TestFromClientSide { private List getRegionsInRange(TableName tableName, byte[] startKey, byte[] endKey) throws IOException { - List regionsInRange = new ArrayList(); + List regionsInRange = new ArrayList<>(); byte[] currentKey = startKey; final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW); try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName);) { @@ -6237,7 +6237,7 @@ public class TestFromClientSide { HRegionLocator locator = (HRegionLocator) admin.getConnection().getRegionLocator(htd.getTableName()); for (int regionReplication = 1; regionReplication < 4; regionReplication++) { - List regionLocations = new ArrayList(); + List regionLocations = new ArrayList<>(); // mock region locations coming from meta with multiple replicas for (HRegionInfo region : regions) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 36808221362..7f44a2aeda5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -338,7 +338,7 @@ public class TestFromClientSide3 { put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); - List gets = new ArrayList(); + List gets = new ArrayList<>(); gets.add(new Get(ROW)); gets.add(null); gets.add(new Get(ANOTHERROW)); @@ -432,7 +432,7 @@ public class TestFromClientSide3 { put.addColumn(FAMILY, QUALIFIER, VALUE); table.put (put); - List gets = new ArrayList(); + List gets = new ArrayList<>(); gets.add(new Get(ANOTHERROW)); gets.add(new Get(Bytes.add(ROW, new byte[] { 0x00 }))); gets.add(new Get(ROW)); @@ -450,7 +450,7 @@ public class TestFromClientSide3 { put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); - gets = new ArrayList(); + gets = new ArrayList<>(); gets.add(new Get(new byte[] { 0x00 })); gets.add(new Get(new byte[] { 0x00, 0x00 })); results = table.existsAll(gets); @@ -462,7 +462,7 @@ public class TestFromClientSide3 { put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); - gets = new ArrayList(); + gets = new ArrayList<>(); gets.add(new Get(new byte[] { (byte) 0xff })); gets.add(new Get(new byte[] { (byte) 0xff, (byte) 0xff })); gets.add(new Get(new byte[] { (byte) 0xff, (byte) 0xff, (byte) 0xff })); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java index 8d93a0aea79..1eb83d9329a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java @@ -170,7 +170,7 @@ public class TestHBaseAdminNoCluster { testMasterOperationIsRetried(new MethodCaller() { @Override public void call(Admin admin) throws Exception { - admin.getTableDescriptorsByTableName(new ArrayList()); + admin.getTableDescriptorsByTableName(new ArrayList<>()); } @Override public void verify(MasterKeepAliveConnection masterAdmin, int count) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 84758286a54..70be7fadcce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -244,7 +244,7 @@ public class TestHCM { public void testClusterConnection() throws IOException { ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, 5, TimeUnit.SECONDS, - new SynchronousQueue(), + new SynchronousQueue<>(), Threads.newDaemonThreadFactory("test-hcm")); Connection con1 = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); @@ -748,7 +748,7 @@ public class TestHCM { // 4 steps: ready=0; doGets=1; mustStop=2; stopped=3 final AtomicInteger step = new AtomicInteger(0); - final AtomicReference failed = new AtomicReference(null); + final AtomicReference failed = new AtomicReference<>(null); Thread t = new Thread("testConnectionCloseThread") { @Override public void run() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java index 26764d37d3d..5c47de0c46f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java @@ -131,7 +131,7 @@ public class TestHTableMultiplexer { } // MultiPut case - List multiput = new ArrayList(); + List multiput = new ArrayList<>(); for (int i = 0; i < NUM_REGIONS; i++) { byte [] row = endRows[i]; if (row == null || row.length <= 0) continue; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java index 9a2aa3d122a..999760de72f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java @@ -200,7 +200,7 @@ public class TestIllegalTableDescriptor { } private static class ListAppender extends AppenderSkeleton { - private final List messages = new ArrayList(); + private final List messages = new ArrayList<>(); @Override protected void append(LoggingEvent event) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java index 3d7e1dc09ff..44068122557 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java @@ -94,7 +94,7 @@ public class TestIncrementsFromClientSide { @Test public void testDuplicateIncrement() throws Exception { HTableDescriptor hdt = TEST_UTIL.createTableDescriptor(TableName.valueOf(name.getMethodName())); - Map kvs = new HashMap(); + Map kvs = new HashMap<>(); kvs.put(HConnectionTestingUtility.SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); hdt.addCoprocessor(HConnectionTestingUtility.SleepAtFirstRpcCall.class.getName(), null, 1, kvs); TEST_UTIL.createTable(hdt, new byte[][] { ROW }).close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java index 1f6dc98be67..43a2e773925 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java @@ -68,7 +68,7 @@ public class TestIntraRowPagination { Result result; boolean toLog = true; - List kvListExp = new ArrayList(); + List kvListExp = new ArrayList<>(); int storeOffset = 1; int storeLimit = 3; @@ -91,8 +91,8 @@ public class TestIntraRowPagination { scan.setRowOffsetPerColumnFamily(storeOffset); scan.setMaxResultsPerColumnFamily(storeLimit); RegionScanner scanner = region.getScanner(scan); - List kvListScan = new ArrayList(); - List results = new ArrayList(); + List kvListScan = new ArrayList<>(); + List results = new ArrayList<>(); while (scanner.next(results) || !results.isEmpty()) { kvListScan.addAll(results); results.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index 1209d25ddbe..a3bcc76685d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -110,7 +110,7 @@ public class TestMultiParallel { // not a multiple of the number of regions int numKeys = (int) ((float) starterKeys.length * 10.33F); - List keys = new ArrayList(); + List keys = new ArrayList<>(); for (int i = 0; i < numKeys; i++) { int kIdx = i % starterKeys.length; byte[] k = starterKeys[kIdx]; @@ -155,7 +155,7 @@ public class TestMultiParallel { try (Table t = connection.getTable(TEST_TABLE, executor)) { List puts = constructPutRequests(); // creates a Put for every region t.batch(puts, null); - HashSet regionservers = new HashSet(); + HashSet regionservers = new HashSet<>(); try (RegionLocator locator = connection.getRegionLocator(TEST_TABLE)) { for (Row r : puts) { HRegionLocation location = locator.getRegionLocation(r.getRow()); @@ -180,7 +180,7 @@ public class TestMultiParallel { table.batch(puts, null); // create a list of gets and run it - List gets = new ArrayList(); + List gets = new ArrayList<>(); for (byte[] k : KEYS) { Get get = new Get(k); get.addColumn(BYTES_FAMILY, QUALIFIER); @@ -190,7 +190,7 @@ public class TestMultiParallel { table.batch(gets, multiRes); // Same gets using individual call API - List singleRes = new ArrayList(); + List singleRes = new ArrayList<>(); for (Row get : gets) { singleRes.add(table.get((Get) get)); } @@ -214,7 +214,7 @@ public class TestMultiParallel { LOG.info("test=testBadFam"); Table table = UTIL.getConnection().getTable(TEST_TABLE); - List actions = new ArrayList(); + List actions = new ArrayList<>(); Put p = new Put(Bytes.toBytes("row1")); p.addColumn(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value")); actions.add(p); @@ -368,7 +368,7 @@ public class TestMultiParallel { validateSizeAndEmpty(results, KEYS.length); // Deletes - List deletes = new ArrayList(); + List deletes = new ArrayList<>(); for (int i = 0; i < KEYS.length; i++) { Delete delete = new Delete(KEYS[i]); delete.addFamily(BYTES_FAMILY); @@ -399,7 +399,7 @@ public class TestMultiParallel { validateSizeAndEmpty(results, KEYS.length); // Deletes - ArrayList deletes = new ArrayList(); + ArrayList deletes = new ArrayList<>(); for (int i = 0; i < KEYS.length; i++) { Delete delete = new Delete(KEYS[i]); delete.addFamily(BYTES_FAMILY); @@ -422,7 +422,7 @@ public class TestMultiParallel { LOG.info("test=testBatchWithManyColsInOneRowGetAndPut"); Table table = UTIL.getConnection().getTable(TEST_TABLE); - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (int i = 0; i < 100; i++) { Put put = new Put(ONE_ROW); byte[] qual = Bytes.toBytes("column" + i); @@ -436,7 +436,7 @@ public class TestMultiParallel { validateSizeAndEmpty(results, 100); // get the data back and validate that it is correct - List gets = new ArrayList(); + List gets = new ArrayList<>(); for (int i = 0; i < 100; i++) { Get get = new Get(ONE_ROW); byte[] qual = Bytes.toBytes("column" + i); @@ -478,7 +478,7 @@ public class TestMultiParallel { Append a = new Append(ONE_ROW); a.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("def")); a.add(BYTES_FAMILY, QUAL4, Bytes.toBytes("xyz")); - List actions = new ArrayList(); + List actions = new ArrayList<>(); actions.add(inc); actions.add(a); @@ -604,7 +604,7 @@ public class TestMultiParallel { // Batch: get, get, put(new col), delete, get, get of put, get of deleted, // put - List actions = new ArrayList(); + List actions = new ArrayList<>(); byte[] qual2 = Bytes.toBytes("qual2"); byte[] val2 = Bytes.toBytes("putvalue2"); @@ -693,7 +693,7 @@ public class TestMultiParallel { private void validateLoadedData(Table table) throws IOException { // get the data back and validate that it is correct LOG.info("Validating data on " + table); - List gets = new ArrayList(); + List gets = new ArrayList<>(); for (byte[] k : KEYS) { Get get = new Get(k); get.addColumn(BYTES_FAMILY, QUALIFIER); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index 22e88da292e..becb2eb1679 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -377,12 +377,12 @@ public class TestReplicaWithCluster { final int numRows = 10; final byte[] qual = Bytes.toBytes("qual"); final byte[] val = Bytes.toBytes("val"); - final List> famPaths = new ArrayList>(); + final List> famPaths = new ArrayList<>(); for (HColumnDescriptor col : hdt.getColumnFamilies()) { Path hfile = new Path(dir, col.getNameAsString()); TestHRegionServerBulkLoad.createHFile(HTU.getTestFileSystem(), hfile, col.getName(), qual, val, numRows); - famPaths.add(new Pair(col.getName(), hfile.toString())); + famPaths.add(new Pair<>(col.getName(), hfile.toString())); } // bulk load HFiles diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index 7cbb5add08c..7b4442bc41a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -103,7 +103,7 @@ public class TestReplicasClient { static final AtomicBoolean slowDownNext = new AtomicBoolean(false); static final AtomicInteger countOfNext = new AtomicInteger(0); private static final AtomicReference cdl = - new AtomicReference(new CountDownLatch(0)); + new AtomicReference<>(new CountDownLatch(0)); Random r = new Random(); public SlowMeCopro() { } @@ -530,7 +530,7 @@ public class TestReplicasClient { public void testCancelOfMultiGet() throws Exception { openRegion(hriSecondary); try { - List puts = new ArrayList(2); + List puts = new ArrayList<>(2); byte[] b1 = Bytes.toBytes("testCancelOfMultiGet" + 0); Put p = new Put(b1); p.addColumn(f, b1, b1); @@ -552,7 +552,7 @@ public class TestReplicasClient { // Make primary slowdown SlowMeCopro.getCdl().set(new CountDownLatch(1)); - List gets = new ArrayList(); + List gets = new ArrayList<>(); Get g = new Get(b1); g.setCheckExistenceOnly(true); g.setConsistency(Consistency.TIMELINE); @@ -762,7 +762,7 @@ public class TestReplicasClient { Iterator iter = scanner.iterator(); // Maps of row keys that we have seen so far - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); // Tracked metrics int rowCount = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java index ec6e020f32f..3190fb9c9a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java @@ -304,7 +304,7 @@ public class TestRestoreSnapshotFromClient { private Set getFamiliesFromFS(final TableName tableName) throws IOException { MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); - Set families = new HashSet(); + Set families = new HashSet<>(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) { for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index 42fecfb5ab1..6f400932b40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -151,7 +151,7 @@ public class TestScannersFromClientSide { scanner = ht.getScanner(scan); // c4:4, c5:5, c6:6, c7:7 - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[4], 4, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[5], 5, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[6], 6, VALUE)); @@ -166,14 +166,14 @@ public class TestScannersFromClientSide { scanner = ht.getScanner(scan); // First batch: c4:4, c5:5 - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[4], 4, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[5], 5, VALUE)); result = scanner.next(); verifyResult(result, kvListExp, toLog, "Testing first batch of scan"); // Second batch: c6:6, c7:7 - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[6], 6, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[7], 7, VALUE)); result = scanner.next(); @@ -204,7 +204,7 @@ public class TestScannersFromClientSide { byte[] cellValue = Bytes.createMaxByteArray(cellSize); Put put; - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (int row = 0; row < ROWS.length; row++) { put = new Put(ROWS[row]); for (int qual = 0; qual < QUALIFIERS.length; qual++) { @@ -244,7 +244,7 @@ public class TestScannersFromClientSide { Table ht = TEST_UTIL.createTable(tableName, FAMILY); Put put; - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (int row = 0; row < ROWS.length; row++) { put = new Put(ROWS[row]); for (int qual = 0; qual < QUALIFIERS.length; qual++) { @@ -327,7 +327,7 @@ public class TestScannersFromClientSide { boolean toLog = true; List kvListExp; - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); // Insert one CF for row[0] put = new Put(ROW); for (int i=0; i < 10; i++) { @@ -344,7 +344,7 @@ public class TestScannersFromClientSide { get = new Get(ROW); get.setMaxResultsPerColumnFamily(2); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[0], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE)); verifyResult(result, kvListExp, toLog, "Testing basic setMaxResults"); @@ -355,7 +355,7 @@ public class TestScannersFromClientSide { get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], true)); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[2], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[3], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[4], 1, VALUE)); @@ -383,7 +383,7 @@ public class TestScannersFromClientSide { get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); //Exp: CF1:q0, ..., q9, CF2: q0, q1, q10, q11, ..., q19 for (int i=0; i < 10; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[i], 1, VALUE)); @@ -401,7 +401,7 @@ public class TestScannersFromClientSide { get.setMaxResultsPerColumnFamily(3); get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, null, true)); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); for (int i=2; i < 5; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE)); } @@ -417,7 +417,7 @@ public class TestScannersFromClientSide { get.setMaxResultsPerColumnFamily(7); get.setFilter(new ColumnPrefixFilter(QUALIFIERS[1])); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[1], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[1], 1, VALUE)); @@ -448,7 +448,7 @@ public class TestScannersFromClientSide { boolean toLog = true; List kvListExp, kvListScan; - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); for (int r=0; r < ROWS.length; r++) { put = new Put(ROWS[r]); @@ -467,7 +467,7 @@ public class TestScannersFromClientSide { scan = new Scan(); scan.setMaxResultsPerColumnFamily(4); ResultScanner scanner = ht.getScanner(scan); - kvListScan = new ArrayList(); + kvListScan = new ArrayList<>(); while ((result = scanner.next()) != null) { for (Cell kv : result.listCells()) { kvListScan.add(kv); @@ -498,7 +498,7 @@ public class TestScannersFromClientSide { List kvListExp; // Insert one CF for row - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); put = new Put(ROW); for (int i=0; i < 10; i++) { KeyValue kv = new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE); @@ -519,7 +519,7 @@ public class TestScannersFromClientSide { get = new Get(ROW); get.setRowOffsetPerColumnFamily(20); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); verifyResult(result, kvListExp, toLog, "Testing offset > #kvs"); //offset + maxResultPerCF @@ -527,7 +527,7 @@ public class TestScannersFromClientSide { get.setRowOffsetPerColumnFamily(4); get.setMaxResultsPerColumnFamily(5); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); for (int i=4; i < 9; i++) { kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[i], 1, VALUE)); } @@ -540,7 +540,7 @@ public class TestScannersFromClientSide { get.setFilter(new ColumnRangeFilter(QUALIFIERS[2], true, QUALIFIERS[5], true)); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[3], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[4], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[5], 1, VALUE)); @@ -563,7 +563,7 @@ public class TestScannersFromClientSide { get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); result = ht.get(get); - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); //Exp: CF1:q4, q5, CF2: q4, q5 kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[4], 1, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[5], 1, VALUE)); @@ -644,7 +644,7 @@ public class TestScannersFromClientSide { } // c0:0, c1:1 - kvListExp = new ArrayList(); + kvListExp = new ArrayList<>(); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[0], 0, VALUE)); kvListExp.add(new KeyValue(ROW, FAMILY, QUALIFIERS[1], 1, VALUE)); result = scanner.next(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 3d8ee558aec..f2f3b26ed6b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -337,7 +337,7 @@ public class TestSnapshotFromClient { LOG.debug(table2Snapshot1 + " completed."); List listTableSnapshots = admin.listTableSnapshots("test.*", ".*"); - List listTableSnapshotNames = new ArrayList(); + List listTableSnapshotNames = new ArrayList<>(); assertEquals(3, listTableSnapshots.size()); for (SnapshotDescription s : listTableSnapshots) { listTableSnapshotNames.add(s.getName()); @@ -379,7 +379,7 @@ public class TestSnapshotFromClient { List listTableSnapshots = admin.listTableSnapshots("test.*", "Table1.*"); - List listTableSnapshotNames = new ArrayList(); + List listTableSnapshotNames = new ArrayList<>(); assertEquals(2, listTableSnapshots.size()); for (SnapshotDescription s : listTableSnapshots) { listTableSnapshotNames.add(s.getName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java index 55086b5c2a3..99c4340a147 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java @@ -193,7 +193,7 @@ public class TestSnapshotMetadata { final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); // restore the snapshot into a cloned table and examine the output - List familiesList = new ArrayList(); + List familiesList = new ArrayList<>(); Collections.addAll(familiesList, families); // Create a snapshot in which all families are empty @@ -262,8 +262,8 @@ public class TestSnapshotMetadata { // populate it with data final byte[] familyForUpdate = BLOCKSIZE_FAM; - List familiesWithDataList = new ArrayList(); - List emptyFamiliesList = new ArrayList(); + List familiesWithDataList = new ArrayList<>(); + List emptyFamiliesList = new ArrayList<>(); if (addData) { Table original = UTIL.getConnection().getTable(originalTableName); UTIL.loadTable(original, familyForUpdate); // family arbitrarily chosen diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java index 5c487d7d600..0a1fafe398c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java @@ -145,7 +145,7 @@ public class TestTimestampsFilter { // Request an empty list of versions using the Timestamps filter; // Should return none. - kvs = getNVersions(ht, FAMILY, 2, 2, new ArrayList()); + kvs = getNVersions(ht, FAMILY, 2, 2, new ArrayList<>()); assertEquals(0, kvs == null? 0: kvs.length); // @@ -192,7 +192,7 @@ public class TestTimestampsFilter { p.addColumn(FAMILY, Bytes.toBytes("column4"), (long) 3, Bytes.toBytes("value4-3")); ht.put(p); - ArrayList timestamps = new ArrayList(); + ArrayList timestamps = new ArrayList<>(); timestamps.add(new Long(3)); TimestampsFilter filter = new TimestampsFilter(timestamps); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index 481f311e538..f092a48afbc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -255,7 +255,7 @@ public class TestReplicationAdmin { // append table column family: f1 of t3 to replication tableCFs.clear(); - tableCFs.put(tableName3, new ArrayList()); + tableCFs.put(tableName3, new ArrayList<>()); tableCFs.get(tableName3).add("f1"); admin.appendPeerTableCFs(ID_ONE, tableCFs); result = ReplicationSerDeHelper.parseTableCFsFromConfig(admin.getPeerTableCFs(ID_ONE)); @@ -269,7 +269,7 @@ public class TestReplicationAdmin { assertEquals("f1", result.get(tableName3).get(0)); tableCFs.clear(); - tableCFs.put(tableName4, new ArrayList()); + tableCFs.put(tableName4, new ArrayList<>()); tableCFs.get(tableName4).add("f1"); tableCFs.get(tableName4).add("f2"); admin.appendPeerTableCFs(ID_ONE, tableCFs); @@ -289,10 +289,10 @@ public class TestReplicationAdmin { // append "table5" => [], then append "table5" => ["f1"] tableCFs.clear(); - tableCFs.put(tableName5, new ArrayList()); + tableCFs.put(tableName5, new ArrayList<>()); admin.appendPeerTableCFs(ID_ONE, tableCFs); tableCFs.clear(); - tableCFs.put(tableName5, new ArrayList()); + tableCFs.put(tableName5, new ArrayList<>()); tableCFs.get(tableName5).add("f1"); admin.appendPeerTableCFs(ID_ONE, tableCFs); result = ReplicationSerDeHelper.parseTableCFsFromConfig(admin.getPeerTableCFs(ID_ONE)); @@ -303,11 +303,11 @@ public class TestReplicationAdmin { // append "table6" => ["f1"], then append "table6" => [] tableCFs.clear(); - tableCFs.put(tableName6, new ArrayList()); + tableCFs.put(tableName6, new ArrayList<>()); tableCFs.get(tableName6).add("f1"); admin.appendPeerTableCFs(ID_ONE, tableCFs); tableCFs.clear(); - tableCFs.put(tableName6, new ArrayList()); + tableCFs.put(tableName6, new ArrayList<>()); admin.appendPeerTableCFs(ID_ONE, tableCFs); result = ReplicationSerDeHelper.parseTableCFsFromConfig(admin.getPeerTableCFs(ID_ONE)); assertEquals(6, result.size()); @@ -339,7 +339,7 @@ public class TestReplicationAdmin { tableCFs.clear(); tableCFs.put(tableName1, null); - tableCFs.put(tableName2, new ArrayList()); + tableCFs.put(tableName2, new ArrayList<>()); tableCFs.get(tableName2).add("cf1"); admin.setPeerTableCFs(ID_ONE, tableCFs); try { @@ -360,7 +360,7 @@ public class TestReplicationAdmin { try { tableCFs.clear(); - tableCFs.put(tableName1, new ArrayList()); + tableCFs.put(tableName1, new ArrayList<>()); tableCFs.get(tableName1).add("f1"); admin.removePeerTableCFs(ID_ONE, tableCFs); assertTrue(false); @@ -382,13 +382,13 @@ public class TestReplicationAdmin { } catch (ReplicationException e) { } tableCFs.clear(); - tableCFs.put(tableName2, new ArrayList()); + tableCFs.put(tableName2, new ArrayList<>()); tableCFs.get(tableName2).add("cf1"); admin.removePeerTableCFs(ID_ONE, tableCFs); assertNull(admin.getPeerTableCFs(ID_ONE)); tableCFs.clear(); - tableCFs.put(tableName4, new ArrayList()); + tableCFs.put(tableName4, new ArrayList<>()); admin.setPeerTableCFs(ID_ONE, tableCFs); admin.removePeerTableCFs(ID_ONE, tableCFs); assertNull(admin.getPeerTableCFs(ID_ONE)); @@ -407,7 +407,7 @@ public class TestReplicationAdmin { admin.peerAdded(ID_ONE); rpc = admin.getPeerConfig(ID_ONE); - Set namespaces = new HashSet(); + Set namespaces = new HashSet<>(); namespaces.add(ns1); namespaces.add(ns2); rpc.setNamespaces(namespaces); @@ -448,7 +448,7 @@ public class TestReplicationAdmin { admin.updatePeerConfig(ID_ONE, rpc); rpc = admin.getPeerConfig(ID_ONE); Map> tableCfs = new HashMap<>(); - tableCfs.put(tableName1, new ArrayList()); + tableCfs.put(tableName1, new ArrayList<>()); rpc.setTableCFsMap(tableCfs); try { admin.updatePeerConfig(ID_ONE, rpc); @@ -460,7 +460,7 @@ public class TestReplicationAdmin { rpc = admin.getPeerConfig(ID_ONE); tableCfs.clear(); - tableCfs.put(tableName2, new ArrayList()); + tableCfs.put(tableName2, new ArrayList<>()); rpc.setTableCFsMap(tableCfs); admin.updatePeerConfig(ID_ONE, rpc); rpc = admin.getPeerConfig(ID_ONE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java index 24889ad3251..b44ecbf07b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java @@ -183,8 +183,7 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { } assertFalse("Table should not exists in the peer cluster", admin2.isTableAvailable(TestReplicationBase.tableName)); - Map> tableCfs = - new HashMap>(); + Map> tableCfs = new HashMap<>(); tableCfs.put(tableName, null); try { adminExt.setPeerTableCFs(peerId, tableCfs); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java index acc3fca678b..12a229d250b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java @@ -75,11 +75,9 @@ public class TestConstraints { @Test public void testReadWriteWithConf() throws Throwable { HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName())); - Constraints.add( - desc, - new Pair, Configuration>( - CheckConfigurationConstraint.class, CheckConfigurationConstraint - .getConfiguration())); + Constraints.add(desc, + new Pair<>(CheckConfigurationConstraint.class, + CheckConfigurationConstraint.getConfiguration())); List c = Constraints.getConstraints(desc, this .getClass().getClassLoader()); @@ -88,7 +86,7 @@ public class TestConstraints { assertEquals(CheckConfigurationConstraint.class, c.get(0).getClass()); // check to make sure that we overwrite configurations - Constraints.add(desc, new Pair, Configuration>( + Constraints.add(desc, new Pair<>( CheckConfigurationConstraint.class, new Configuration(false))); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index 21d98619756..422c54bf8a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -349,7 +349,7 @@ public class TestCoprocessorInterface { RegionScanner scanner = region.getCoprocessorHost().postScannerOpen(s, region.getScanner(s)); assertTrue(scanner instanceof CustomScanner); // this would throw an exception before HBASE-4197 - scanner.next(new ArrayList()); + scanner.next(new ArrayList<>()); HBaseTestingUtility.closeRegionAndWAL(region); Coprocessor c = region.getCoprocessorHost(). diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java index 04d8c8c1053..9f20ba23281 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java @@ -221,7 +221,7 @@ public class TestHTableWrapper { // multiple deletes: Delete[] deletes = new Delete[] { new Delete(ROW_D), new Delete(ROW_E) }; - hTableInterface.delete(new ArrayList(Arrays.asList(deletes))); + hTableInterface.delete(new ArrayList<>(Arrays.asList(deletes))); checkRowsValues(new byte[][] { ROW_D, ROW_E }, new byte[][] { null, null }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java index b75fc792867..1ed0008bfea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java @@ -92,7 +92,7 @@ public class TestOpenTableInCoprocessor { long keepAliveTime = 60; ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, - new SynchronousQueue(), Threads.newDaemonThreadFactory("hbase-table")); + new SynchronousQueue<>(), Threads.newDaemonThreadFactory("hbase-table")); pool.allowCoreThreadTimeOut(true); return pool; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java index fb87ff63830..63d7544a163 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java @@ -112,7 +112,7 @@ public class TestRegionObserverBypass { EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge()); Table t = util.getConnection().getTable(tableName); - List puts = new ArrayList(); + List puts = new ArrayList<>(); Put p = new Put(row1); p.addColumn(dummy, dummy, dummy); puts.add(p); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 2e0db449ea8..7b4cc40d87d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -424,7 +424,7 @@ public class TestRegionObserverInterface { @Override public boolean next(List results, ScannerContext scannerContext) throws IOException { - List internalResults = new ArrayList(); + List internalResults = new ArrayList<>(); boolean hasMore; do { hasMore = scanner.next(internalResults, scannerContext); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 2ab91c1ea79..5fb5421933c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -181,8 +181,7 @@ public class TestWALObserver { HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE)); final HTableDescriptor htd = createBasic3FamilyHTD(Bytes .toString(TEST_TABLE)); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -272,8 +271,7 @@ public class TestWALObserver { final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE)); final HTableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE)); final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -328,8 +326,7 @@ public class TestWALObserver { WALEdit edit = new WALEdit(); long now = EnvironmentEdgeManager.currentTime(); final int countPerFamily = 1000; - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (HColumnDescriptor hcd : htd.getFamilies()) { scopes.put(hcd.getName(), 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java index a195ec70aba..d624c93392c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java @@ -56,9 +56,8 @@ import com.google.common.collect.Sets; @Category({MasterTests.class, SmallTests.class}) public class TestFavoredNodeAssignmentHelper { - private static List servers = new ArrayList(); - private static Map> rackToServers = new HashMap>(); + private static List servers = new ArrayList<>(); + private static Map> rackToServers = new HashMap<>(); private static RackManager rackManager = Mockito.mock(RackManager.class); // Some tests have randomness, so we run them multiple times @@ -76,7 +75,7 @@ public class TestFavoredNodeAssignmentHelper { if (i < 10) { Mockito.when(rackManager.getRack(server)).thenReturn("rack1"); if (rackToServers.get("rack1") == null) { - List servers = new ArrayList(); + List servers = new ArrayList<>(); rackToServers.put("rack1", servers); } rackToServers.get("rack1").add(server); @@ -84,7 +83,7 @@ public class TestFavoredNodeAssignmentHelper { if (i >= 10 && i < 20) { Mockito.when(rackManager.getRack(server)).thenReturn("rack2"); if (rackToServers.get("rack2") == null) { - List servers = new ArrayList(); + List servers = new ArrayList<>(); rackToServers.put("rack2", servers); } rackToServers.get("rack2").add(server); @@ -92,7 +91,7 @@ public class TestFavoredNodeAssignmentHelper { if (i >= 20 && i < 30) { Mockito.when(rackManager.getRack(server)).thenReturn("rack3"); if (rackToServers.get("rack3") == null) { - List servers = new ArrayList(); + List servers = new ArrayList<>(); rackToServers.put("rack3", servers); } rackToServers.get("rack3").add(server); @@ -105,7 +104,7 @@ public class TestFavoredNodeAssignmentHelper { // work with from any given rack // Return a rondom 'count' number of servers from 'rack' private static List getServersFromRack(Map rackToServerCount) { - List chosenServers = new ArrayList(); + List chosenServers = new ArrayList<>(); for (Map.Entry entry : rackToServerCount.entrySet()) { List servers = rackToServers.get(entry.getKey()); for (int i = 0; i < entry.getValue(); i++) { @@ -119,7 +118,7 @@ public class TestFavoredNodeAssignmentHelper { public void testSmallCluster() { // Test the case where we cannot assign favored nodes (because the number // of nodes in the cluster is too less) - Map rackToServerCount = new HashMap(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 2); List servers = getServersFromRack(rackToServerCount); FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, @@ -158,7 +157,7 @@ public class TestFavoredNodeAssignmentHelper { public void testSecondaryAndTertiaryPlacementWithSingleRack() { // Test the case where there is a single rack and we need to choose // Primary/Secondary/Tertiary from a single rack. - Map rackToServerCount = new HashMap(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 10); // have lots of regions to test with Triple, FavoredNodeAssignmentHelper, List> @@ -184,7 +183,7 @@ public class TestFavoredNodeAssignmentHelper { public void testSecondaryAndTertiaryPlacementWithSingleServer() { // Test the case where we have a single node in the cluster. In this case // the primary can be assigned but the secondary/tertiary would be null - Map rackToServerCount = new HashMap(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 1); Triple, FavoredNodeAssignmentHelper, List> primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount); @@ -202,7 +201,7 @@ public class TestFavoredNodeAssignmentHelper { public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { // Test the case where we have multiple racks and the region servers // belong to multiple racks - Map rackToServerCount = new HashMap(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 10); rackToServerCount.put("rack2", 10); @@ -233,7 +232,7 @@ public class TestFavoredNodeAssignmentHelper { public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() { // Test the case where we have two racks but with less than two servers in each // We will not have enough machines to select secondary/tertiary - Map rackToServerCount = new HashMap(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 1); rackToServerCount.put("rack2", 1); Triple, FavoredNodeAssignmentHelper, List> @@ -257,7 +256,7 @@ public class TestFavoredNodeAssignmentHelper { // racks than what the primary is on. But if the other rack doesn't have // enough nodes to have both secondary/tertiary RSs, the tertiary is placed // on the same rack as the primary server is on - Map rackToServerCount = new HashMap(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", 2); rackToServerCount.put("rack2", 1); Triple, FavoredNodeAssignmentHelper, List> @@ -290,7 +289,7 @@ public class TestFavoredNodeAssignmentHelper { new HashMap>(); helper.initialize(); // create regions - List regions = new ArrayList(regionCount); + List regions = new ArrayList<>(regionCount); for (int i = 0; i < regionCount; i++) { HRegionInfo region = new HRegionInfo(TableName.valueOf(name.getMethodName()), Bytes.toBytes(i), Bytes.toBytes(i + 1)); @@ -298,13 +297,12 @@ public class TestFavoredNodeAssignmentHelper { } // place the regions helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); - return new Triple, FavoredNodeAssignmentHelper, List> - (primaryRSMap, helper, regions); + return new Triple<>(primaryRSMap, helper, regions); } private void primaryRSPlacement(int regionCount, Map primaryRSMap, int firstRackSize, int secondRackSize, int thirdRackSize) { - Map rackToServerCount = new HashMap(); + Map rackToServerCount = new HashMap<>(); rackToServerCount.put("rack1", firstRackSize); rackToServerCount.put("rack2", secondRackSize); rackToServerCount.put("rack3", thirdRackSize); @@ -315,11 +313,10 @@ public class TestFavoredNodeAssignmentHelper { assertTrue(helper.canPlaceFavoredNodes()); - Map> assignmentMap = - new HashMap>(); - if (primaryRSMap == null) primaryRSMap = new HashMap(); + Map> assignmentMap = new HashMap<>(); + if (primaryRSMap == null) primaryRSMap = new HashMap<>(); // create some regions - List regions = new ArrayList(regionCount); + List regions = new ArrayList<>(regionCount); for (int i = 0; i < regionCount; i++) { HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"), Bytes.toBytes(i), Bytes.toBytes(i + 1)); @@ -354,11 +351,11 @@ public class TestFavoredNodeAssignmentHelper { //Verify the ordering was as expected by inserting the racks and regions //in sorted maps. The keys being the racksize and numregions; values are //the relative positions of the racksizes and numregions respectively - SortedMap rackMap = new TreeMap(); + SortedMap rackMap = new TreeMap<>(); rackMap.put(firstRackSize, 1); rackMap.put(secondRackSize, 2); rackMap.put(thirdRackSize, 3); - SortedMap regionMap = new TreeMap(); + SortedMap regionMap = new TreeMap<>(); regionMap.put(regionsOnRack1, 1); regionMap.put(regionsOnRack2, 2); regionMap.put(regionsOnRack3, 3); @@ -390,7 +387,7 @@ public class TestFavoredNodeAssignmentHelper { helper.initialize(); assertTrue(helper.canPlaceFavoredNodes()); - List regions = new ArrayList(20); + List regions = new ArrayList<>(20); for (int i = 0; i < 20; i++) { HRegionInfo region = new HRegionInfo(TableName.valueOf(name.getMethodName()), Bytes.toBytes(i), Bytes.toBytes(i + 1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java index 1d241404fa1..9b71d454665 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java @@ -70,13 +70,12 @@ public class TestColumnPrefixFilter { List columns = generateRandomWords(10000, "column"); long maxTimestamp = 2; - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); - Map> prefixMap = new HashMap>(); + Map> prefixMap = new HashMap<>(); - prefixMap.put("p", new ArrayList()); - prefixMap.put("s", new ArrayList()); + prefixMap.put("p", new ArrayList<>()); + prefixMap.put("s", new ArrayList<>()); String valueString = "ValueString"; @@ -108,7 +107,7 @@ public class TestColumnPrefixFilter { scan.setFilter(filter); InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results)) ; assertEquals(prefixMap.get(s).size(), results.size()); @@ -133,13 +132,12 @@ public class TestColumnPrefixFilter { List columns = generateRandomWords(10000, "column"); long maxTimestamp = 2; - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); - Map> prefixMap = new HashMap>(); + Map> prefixMap = new HashMap<>(); - prefixMap.put("p", new ArrayList()); - prefixMap.put("s", new ArrayList()); + prefixMap.put("p", new ArrayList<>()); + prefixMap.put("s", new ArrayList<>()); String valueString = "ValueString"; @@ -174,7 +172,7 @@ public class TestColumnPrefixFilter { scan.setFilter(filterList); InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results)) ; assertEquals(prefixMap.get(s).size(), results.size()); @@ -187,7 +185,7 @@ public class TestColumnPrefixFilter { } List generateRandomWords(int numberOfWords, String suffix) { - Set wordSet = new HashSet(); + Set wordSet = new HashSet<>(); for (int i = 0; i < numberOfWords; i++) { int lengthOfWords = (int) (Math.random()*2) + 1; char[] wordChar = new char[lengthOfWords]; @@ -202,7 +200,7 @@ public class TestColumnPrefixFilter { } wordSet.add(word); } - List wordList = new ArrayList(wordSet); + List wordList = new ArrayList<>(wordSet); return wordList; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java index 04377b0240a..f03a4f0e9d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java @@ -171,18 +171,18 @@ public class TestColumnRangeFilter { long maxTimestamp = 2; List columns = generateRandomWords(20000, 8); - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); - Map> rangeMap = new HashMap>(); + Map> rangeMap = new HashMap<>(); rangeMap.put(new StringRange(null, true, "b", false), - new ArrayList()); + new ArrayList<>()); rangeMap.put(new StringRange("p", true, "q", false), - new ArrayList()); + new ArrayList<>()); rangeMap.put(new StringRange("r", false, "s", true), - new ArrayList()); + new ArrayList<>()); rangeMap.put(new StringRange("z", false, null, false), - new ArrayList()); + new ArrayList<>()); String valueString = "ValueString"; for (String row : rows) { @@ -216,7 +216,7 @@ public class TestColumnRangeFilter { s.isEndInclusive()); scan.setFilter(filter); ResultScanner scanner = ht.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); LOG.info("scan column range: " + s.toString()); long timeBeforeScan = System.currentTimeMillis(); @@ -245,7 +245,7 @@ public class TestColumnRangeFilter { } List generateRandomWords(int numberOfWords, int maxLengthOfWords) { - Set wordSet = new HashSet(); + Set wordSet = new HashSet<>(); for (int i = 0; i < numberOfWords; i++) { int lengthOfWords = (int) (Math.random() * maxLengthOfWords) + 1; char[] wordChar = new char[lengthOfWords]; @@ -255,7 +255,7 @@ public class TestColumnRangeFilter { String word = new String(wordChar); wordSet.add(word); } - List wordList = new ArrayList(wordSet); + List wordList = new ArrayList<>(wordSet); return wordList; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index 1b00ae86b94..704441afaad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -125,7 +125,7 @@ public class TestDependentColumnFilter { } private List makeTestVals() { - List testVals = new ArrayList(); + List testVals = new ArrayList<>(); testVals.add(new KeyValue(ROWS[0], FAMILIES[0], QUALIFIER, STAMPS[0], BAD_VALS[0])); testVals.add(new KeyValue(ROWS[0], FAMILIES[0], QUALIFIER, STAMPS[1], BAD_VALS[1])); testVals.add(new KeyValue(ROWS[0], FAMILIES[1], QUALIFIER, STAMPS[1], BAD_VALS[2])); @@ -147,7 +147,7 @@ public class TestDependentColumnFilter { private void verifyScan(Scan s, long expectedRows, long expectedCells) throws IOException { InternalScanner scanner = this.region.getScanner(s); - List results = new ArrayList(); + List results = new ArrayList<>(); int i = 0; int cells = 0; for (boolean done = true; done; i++) { @@ -231,7 +231,7 @@ public class TestDependentColumnFilter { @Test public void testFilterDropping() throws Exception { Filter filter = new DependentColumnFilter(FAMILIES[0], QUALIFIER); - List accepted = new ArrayList(); + List accepted = new ArrayList<>(); for(Cell val : testVals) { if(filter.filterKeyValue(val) == ReturnCode.INCLUDE) { accepted.add(val); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index e4af75f3862..a403c247183 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -275,7 +275,7 @@ public class TestFilter { // reseek to row three. scanner.reseek(ROWS_THREE[1]); - List results = new ArrayList(); + List results = new ArrayList<>(); // the results should belong to ROWS_THREE[1] scanner.next(results); @@ -285,7 +285,7 @@ public class TestFilter { } // again try to reseek to a value before ROWS_THREE[1] scanner.reseek(ROWS_ONE[1]); - results = new ArrayList(); + results = new ArrayList<>(); // This time no seek would have been done to ROWS_ONE[1] scanner.next(results); for (Cell keyValue : results) { @@ -511,7 +511,7 @@ public class TestFilter { InternalScanner scanner = this.region.getScanner(s); int scannerCounter = 0; while (true) { - boolean isMoreResults = scanner.next(new ArrayList()); + boolean isMoreResults = scanner.next(new ArrayList<>()); scannerCounter++; if (scannerCounter >= pageSize) { @@ -540,7 +540,7 @@ public class TestFilter { InternalScanner scanner = this.region.getScanner(s); while (true) { - ArrayList values = new ArrayList(); + ArrayList values = new ArrayList<>(); boolean isMoreResults = scanner.next(values); if (!isMoreResults || !Bytes.toString(CellUtil.cloneRow(values.get(0))).startsWith(prefix)) { @@ -575,7 +575,7 @@ public class TestFilter { InternalScanner scanner = this.region.getScanner(s); int scannerCounter = 0; while (true) { - boolean isMoreResults = scanner.next(new ArrayList()); + boolean isMoreResults = scanner.next(new ArrayList<>()); scannerCounter++; if (scannerCounter >= pageSize) { @@ -629,7 +629,7 @@ public class TestFilter { s.setFilter(filter); InternalScanner scanner = this.region.getScanner(s); - ArrayList values = new ArrayList(); + ArrayList values = new ArrayList<>(); scanner.next(values); assertTrue("All rows should be filtered out", values.isEmpty()); } @@ -652,7 +652,7 @@ public class TestFilter { InternalScanner scanner = this.region.getScanner(s); while (true) { - ArrayList values = new ArrayList(); + ArrayList values = new ArrayList<>(); boolean isMoreResults = scanner.next(values); if (!isMoreResults || !Bytes.toString(CellUtil.cloneRow(values.get(0))).startsWith(prefix)) { assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining()); @@ -681,7 +681,7 @@ public class TestFilter { InternalScanner scanner = this.region.getScanner(s); while (true) { - ArrayList values = new ArrayList(); + ArrayList values = new ArrayList<>(); boolean isMoreResults = scanner.next(values); assertTrue("The WhileMatchFilter should now filter all remaining", filter.filterAllRemaining()); if (!isMoreResults) { @@ -1370,7 +1370,7 @@ public class TestFilter { // Test getting a single row, single key using Row, Qualifier, and Value // regular expression and substring filters // Use must pass all - List filters = new ArrayList(); + List filters = new ArrayList<>(); filters.add(new RowFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2"))); filters.add(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2"))); filters.add(new ValueFilter(CompareOp.EQUAL, new SubstringComparator("One"))); @@ -1520,7 +1520,7 @@ public class TestFilter { Scan s1 = new Scan(); s1.setFilter(filterList); InternalScanner scanner = testRegion.getScanner(s1); - List results = new ArrayList(); + List results = new ArrayList<>(); int resultCount = 0; while (scanner.next(results)) { resultCount++; @@ -1553,7 +1553,7 @@ public class TestFilter { // Now let's grab rows that have Q_ONE[0](VALUES[0]) and Q_ONE[2](VALUES[1]) // Since group two rows don't have these qualifiers, they will pass // so limiting scan to group one - List filters = new ArrayList(); + List filters = new ArrayList<>(); filters.add(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOp.EQUAL, VALUES[0])); filters.add(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[2], @@ -1573,7 +1573,7 @@ public class TestFilter { // In order to get expected behavior without limiting to group one // need to wrap SCVFs in SkipFilters - filters = new ArrayList(); + filters = new ArrayList<>(); filters.add(new SkipFilter(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0], CompareOp.EQUAL, VALUES[0]))); filters.add(new SkipFilter(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[2], @@ -1661,7 +1661,7 @@ public class TestFilter { private void verifyScan(Scan s, long expectedRows, long expectedKeys) throws IOException { InternalScanner scanner = this.region.getScanner(s); - List results = new ArrayList(); + List results = new ArrayList<>(); int i = 0; for (boolean done = true; done; i++) { done = scanner.next(results); @@ -1683,7 +1683,7 @@ public class TestFilter { long expectedKeys) throws IOException { InternalScanner scanner = this.region.getScanner(s); - List results = new ArrayList(); + List results = new ArrayList<>(); int i = 0; for (boolean done = true; done; i++) { done = scanner.next(results); @@ -1704,7 +1704,7 @@ public class TestFilter { private void verifyScanFull(Scan s, KeyValue [] kvs) throws IOException { InternalScanner scanner = this.region.getScanner(s); - List results = new ArrayList(); + List results = new ArrayList<>(); int row = 0; int idx = 0; for (boolean done = true; done; row++) { @@ -1735,7 +1735,7 @@ public class TestFilter { private void verifyScanFullNoValues(Scan s, KeyValue [] kvs, boolean useLen) throws IOException { InternalScanner scanner = this.region.getScanner(s); - List results = new ArrayList(); + List results = new ArrayList<>(); int row = 0; int idx = 0; for (boolean more = true; more; row++) { @@ -2073,7 +2073,7 @@ public class TestFilter { Scan s1 = new Scan(); s1.setFilter(rowFilter); InternalScanner scanner = testRegion.getScanner(s1); - List results = new ArrayList(); + List results = new ArrayList<>(); int i = 5; for (boolean done = true; done; i++) { done = scanner.next(results); @@ -2092,7 +2092,7 @@ public class TestFilter { s1 = new Scan(); s1.setFilter(subFilterList); scanner = testRegion.getScanner(s1); - results = new ArrayList(); + results = new ArrayList<>(); for (i=0; i<=4; i+=2) { scanner.next(results); assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i))); @@ -2108,7 +2108,7 @@ public class TestFilter { s1 = new Scan(); s1.setFilter(filterList); scanner = testRegion.getScanner(s1); - results = new ArrayList(); + results = new ArrayList<>(); for (i=0; i<=4; i+=2) { scanner.next(results); assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i))); @@ -2129,7 +2129,7 @@ public class TestFilter { s1 = new Scan(); s1.setFilter(filterList); scanner = testRegion.getScanner(s1); - results = new ArrayList(); + results = new ArrayList<>(); for (i=0; i<=4; i+=2) { scanner.next(results); assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java index f80317b565a..ad71fcc8502 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java @@ -125,7 +125,7 @@ public class TestFilterList { } private Filter getFilterMPONE() { - List filters = new ArrayList(); + List filters = new ArrayList<>(); filters.add(new PageFilter(MAX_PAGES)); filters.add(new WhileMatchFilter(new PrefixFilter(Bytes.toBytes("yyy")))); Filter filterMPONE = @@ -191,7 +191,7 @@ public class TestFilterList { } private Filter getMPALLFilter() { - List filters = new ArrayList(); + List filters = new ArrayList<>(); filters.add(new PageFilter(MAX_PAGES)); filters.add(new WhileMatchFilter(new PrefixFilter(Bytes.toBytes("yyy")))); Filter filterMPALL = @@ -239,7 +239,7 @@ public class TestFilterList { } public Filter getOrderingFilter() { - List filters = new ArrayList(); + List filters = new ArrayList<>(); filters.add(new PrefixFilter(Bytes.toBytes("yyy"))); filters.add(new PageFilter(MAX_PAGES)); Filter filterMPONE = @@ -370,7 +370,7 @@ public class TestFilterList { */ @Test public void testSerialization() throws Exception { - List filters = new ArrayList(); + List filters = new ArrayList<>(); filters.add(new PageFilter(MAX_PAGES)); filters.add(new WhileMatchFilter(new PrefixFilter(Bytes.toBytes("yyy")))); Filter filterMPALL = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java index bbde09d9c85..39abc95f060 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java @@ -101,13 +101,13 @@ public class TestFilterListOrOperatorWithBlkCnt { scan.setMaxVersions(); long blocksStart = getBlkAccessCount(); - List ranges1 = new ArrayList(); + List ranges1 = new ArrayList<>(); ranges1.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(15), false)); ranges1.add(new RowRange(Bytes.toBytes(9980), true, Bytes.toBytes(9985), false)); MultiRowRangeFilter filter1 = new MultiRowRangeFilter(ranges1); - List ranges2 = new ArrayList(); + List ranges2 = new ArrayList<>(); ranges2.add(new RowRange(Bytes.toBytes(15), true, Bytes.toBytes(20), false)); ranges2.add(new RowRange(Bytes.toBytes(9985), true, Bytes.toBytes(9990), false)); @@ -156,7 +156,7 @@ public class TestFilterListOrOperatorWithBlkCnt { scan.setStopRow(stopRow); } ResultScanner scanner = ht.getScanner(scan); - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); Result r; while ((r = scanner.next()) != null) { for (Cell kv : r.listCells()) { @@ -168,7 +168,7 @@ public class TestFilterListOrOperatorWithBlkCnt { private int getResultsSize(Table ht, Scan scan) throws IOException { ResultScanner scanner = ht.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); Result r; while ((r = scanner.next()) != null) { for (Cell kv : r.listCells()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java index 7c9651d9846..37e0d2d8aac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java @@ -105,12 +105,12 @@ public class TestFilterSerialization { @Test public void testFilterList() throws Exception { // empty filter list - FilterList filterList = new FilterList(new LinkedList()); + FilterList filterList = new FilterList(new LinkedList<>()); assertTrue(filterList.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(filterList)))); // non-empty filter list - LinkedList list = new LinkedList(); + LinkedList list = new LinkedList<>(); list.add(new ColumnCountGetFilter(1)); list.add(new RowFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("testFilterList"))); @@ -131,7 +131,7 @@ public class TestFilterSerialization { @Test public void testFirstKeyValueMatchingQualifiersFilter() throws Exception { // empty qualifiers set - TreeSet set = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet set = new TreeSet<>(Bytes.BYTES_COMPARATOR); FirstKeyValueMatchingQualifiersFilter firstKeyValueMatchingQualifiersFilter = new FirstKeyValueMatchingQualifiersFilter(set); assertTrue(firstKeyValueMatchingQualifiersFilter.areSerializedFieldsEqual( @@ -155,9 +155,9 @@ public class TestFilterSerialization { @Test public void testFuzzyRowFilter() throws Exception { - LinkedList> fuzzyList = new LinkedList>(); - fuzzyList.add(new Pair(Bytes.toBytes("999"),new byte[] {0, 0, 1})); - fuzzyList.add(new Pair(Bytes.toBytes("abcd"),new byte[] {1, 0, 1, 1})); + LinkedList> fuzzyList = new LinkedList<>(); + fuzzyList.add(new Pair<>(Bytes.toBytes("999"),new byte[] {0, 0, 1})); + fuzzyList.add(new Pair<>(Bytes.toBytes("abcd"),new byte[] {1, 0, 1, 1})); FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(fuzzyList); assertTrue(fuzzyRowFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(fuzzyRowFilter)))); @@ -294,12 +294,12 @@ public class TestFilterSerialization { @Test public void testTimestampsFilter() throws Exception { // Empty timestamp list - TimestampsFilter timestampsFilter = new TimestampsFilter(new LinkedList()); + TimestampsFilter timestampsFilter = new TimestampsFilter(new LinkedList<>()); assertTrue(timestampsFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(timestampsFilter)))); // Non-empty timestamp list - LinkedList list = new LinkedList(); + LinkedList list = new LinkedList<>(); list.add(new Long(System.currentTimeMillis())); list.add(new Long(System.currentTimeMillis())); timestampsFilter = new TimestampsFilter(list); @@ -326,7 +326,7 @@ public class TestFilterSerialization { @Test public void testMultiRowRangeFilter() throws Exception { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java index 0d2940cb727..4b2842cea92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java @@ -96,7 +96,7 @@ public class TestFilterWithScanLimits extends FilterTestingCluster { try { createTable(tableName, columnFamily); Table table = openTable(tableName); - List puts = new ArrayList(); + List puts = new ArrayList<>(); // row1 => , , , , // diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java index 59873bef58c..c4c3e36dc77 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java @@ -75,7 +75,7 @@ public class TestFilterWrapper { int row_number = 0; try { Scan scan = new Scan(); - List fs = new ArrayList(); + List fs = new ArrayList<>(); DependentColumnFilter f1 = new DependentColumnFilter(Bytes.toBytes("f1"), Bytes.toBytes("c5"), true, CompareFilter.CompareOp.EQUAL, @@ -115,7 +115,7 @@ public class TestFilterWrapper { try { Table table = connection.getTable(name); assertTrue("Fail to create the table", admin.tableExists(name)); - List puts = new ArrayList(); + List puts = new ArrayList<>(); // row1 => , , , // , diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java index 0d045f71e9f..dbda361ccf0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java @@ -45,7 +45,7 @@ public class TestFirstKeyValueMatchingQualifiersFilter extends TestCase { * @throws Exception */ public void testFirstKeyMatchingQualifierFilter() throws Exception { - Set quals = new TreeSet(Bytes.BYTES_COMPARATOR); + Set quals = new TreeSet<>(Bytes.BYTES_COMPARATOR); quals.add(COLUMN_QUALIFIER_1); quals.add(COLUMN_QUALIFIER_2); Filter filter = new FirstKeyValueMatchingQualifiersFilter(quals); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java index 53d87d37a1a..989a93bc515 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java @@ -149,7 +149,7 @@ public class TestFuzzyRowAndColumnRangeFilter { byte[] mask = new byte[] {0 , 0, 1, 1, 1, 1, 0, 0, 0, 0}; - Pair pair = new Pair(fuzzyKey, mask); + Pair pair = new Pair<>(fuzzyKey, mask); FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(Lists.newArrayList(pair)); ColumnRangeFilter columnRangeFilter = new ColumnRangeFilter(Bytes.toBytes(cqStart), true , Bytes.toBytes(4), true); @@ -167,7 +167,7 @@ public class TestFuzzyRowAndColumnRangeFilter { scan.setFilter(filterList); ResultScanner scanner = hTable.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); Result result; long timeBeforeScan = System.currentTimeMillis(); while ((result = scanner.next()) != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java index 21aac70a001..3c11efeb766 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java @@ -137,10 +137,10 @@ public class TestFuzzyRowFilterEndToEnd { TEST_UTIL.flush(); - List> data = new ArrayList>(); + List> data = new ArrayList<>(); byte[] fuzzyKey = Bytes.toBytesBinary("\\x9B\\x00\\x044e"); byte[] mask = new byte[] { 0, 0, 0, 0, 0 }; - data.add(new Pair(fuzzyKey, mask)); + data.add(new Pair<>(fuzzyKey, mask)); FuzzyRowFilter filter = new FuzzyRowFilter(data); Scan scan = new Scan(); @@ -187,10 +187,10 @@ public class TestFuzzyRowFilterEndToEnd { TEST_UTIL.flush(); - List> data = new ArrayList>(); + List> data = new ArrayList<>(); byte[] fuzzyKey = Bytes.toBytesBinary("\\x00\\x00\\x044"); byte[] mask = new byte[] { 1,0,0,0}; - data.add(new Pair(fuzzyKey, mask)); + data.add(new Pair<>(fuzzyKey, mask)); FuzzyRowFilter filter = new FuzzyRowFilter(data); Scan scan = new Scan(); @@ -254,7 +254,7 @@ public class TestFuzzyRowFilterEndToEnd { byte[] mask = new byte[] { 0, 0, 1, 1, 1, 1, 0, 0, 0, 0 }; - List> list = new ArrayList>(); + List> list = new ArrayList<>(); for (int i = 0; i < totalFuzzyKeys; i++) { byte[] fuzzyKey = new byte[10]; ByteBuffer buf = ByteBuffer.wrap(fuzzyKey); @@ -265,7 +265,7 @@ public class TestFuzzyRowFilterEndToEnd { } buf.putInt(i); - Pair pair = new Pair(fuzzyKey, mask); + Pair pair = new Pair<>(fuzzyKey, mask); list.add(pair); } @@ -286,7 +286,7 @@ public class TestFuzzyRowFilterEndToEnd { byte[] mask = new byte[] { 0, 0, 1, 1, 1, 1, 0, 0, 0, 0 }; - List> list = new ArrayList>(); + List> list = new ArrayList<>(); for (int i = 0; i < totalFuzzyKeys; i++) { byte[] fuzzyKey = new byte[10]; @@ -298,7 +298,7 @@ public class TestFuzzyRowFilterEndToEnd { } buf.putInt(i * 2); - Pair pair = new Pair(fuzzyKey, mask); + Pair pair = new Pair<>(fuzzyKey, mask); list.add(pair); } @@ -325,7 +325,7 @@ public class TestFuzzyRowFilterEndToEnd { HRegion first = regions.get(0); first.getScanner(scan); RegionScanner scanner = first.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); // Result result; long timeBeforeScan = System.currentTimeMillis(); int found = 0; @@ -408,8 +408,8 @@ public class TestFuzzyRowFilterEndToEnd { byte[] mask2 = new byte[] { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }; - Pair pair1 = new Pair(fuzzyKey1, mask1); - Pair pair2 = new Pair(fuzzyKey2, mask2); + Pair pair1 = new Pair<>(fuzzyKey1, mask1); + Pair pair2 = new Pair<>(fuzzyKey2, mask2); FuzzyRowFilter fuzzyRowFilter1 = new FuzzyRowFilter(Lists.newArrayList(pair1)); FuzzyRowFilter fuzzyRowFilter2 = new FuzzyRowFilter(Lists.newArrayList(pair2)); @@ -426,7 +426,7 @@ public class TestFuzzyRowFilterEndToEnd { scan.setFilter(filterList); ResultScanner scanner = hTable.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); Result result; long timeBeforeScan = System.currentTimeMillis(); while ((result = scanner.next()) != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java index 8291e5257c4..159769ee985 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java @@ -84,8 +84,8 @@ public class TestInvocationRecordFilter { @Test public void testFilterInvocation() throws Exception { - List selectQualifiers = new ArrayList(); - List expectedQualifiers = new ArrayList(); + List selectQualifiers = new ArrayList<>(); + List expectedQualifiers = new ArrayList<>(); selectQualifiers.add(-1); verifyInvocationResults(selectQualifiers.toArray(new Integer[selectQualifiers.size()]), @@ -127,7 +127,7 @@ public class TestInvocationRecordFilter { get.setFilter(new InvocationRecordFilter()); - List expectedValues = new ArrayList(); + List expectedValues = new ArrayList<>(); for (int i = 0; i < expectedQualifiers.length; i++) { expectedValues.add(new KeyValue(ROW_BYTES, FAMILY_NAME_BYTES, Bytes .toBytes(QUALIFIER_PREFIX + expectedQualifiers[i]), @@ -136,8 +136,8 @@ public class TestInvocationRecordFilter { } Scan scan = new Scan(get); - List actualValues = new ArrayList(); - List temp = new ArrayList(); + List actualValues = new ArrayList<>(); + List temp = new ArrayList<>(); InternalScanner scanner = this.region.getScanner(scan); while (scanner.next(temp)) { actualValues.addAll(temp); @@ -161,7 +161,7 @@ public class TestInvocationRecordFilter { */ private static class InvocationRecordFilter extends FilterBase { - private List visitedKeyValues = new ArrayList(); + private List visitedKeyValues = new ArrayList<>(); public void reset() { visitedKeyValues.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java index 271edaf567b..0b1c368ad5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java @@ -120,48 +120,48 @@ public class TestMultiRowRangeFilter { @Test public void testMergeAndSortWithEmptyStartRow() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(15), true, Bytes.toBytes(40), false)); List actualRanges = MultiRowRangeFilter.sortAndMerge(ranges); - List expectedRanges = new ArrayList(); + List expectedRanges = new ArrayList<>(); expectedRanges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(40), false)); assertRangesEqual(expectedRanges, actualRanges); } @Test public void testMergeAndSortWithEmptyStopRow() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(15), true, Bytes.toBytes(""), false)); ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(70), false)); List actualRanges = MultiRowRangeFilter.sortAndMerge(ranges); - List expectedRanges = new ArrayList(); + List expectedRanges = new ArrayList<>(); expectedRanges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false)); assertRangesEqual(expectedRanges, actualRanges); } @Test public void testMergeAndSortWithEmptyStartRowAndStopRow() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(""), false)); ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(70), false)); List actualRanges = MultiRowRangeFilter.sortAndMerge(ranges); - List expectedRanges = new ArrayList(); + List expectedRanges = new ArrayList<>(); expectedRanges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(""), false)); assertRangesEqual(expectedRanges, actualRanges); } @Test(expected=IllegalArgumentException.class) public void testMultiRowRangeWithoutRange() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); new MultiRowRangeFilter(ranges); } @Test(expected=IllegalArgumentException.class) public void testMultiRowRangeWithInvalidRange() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); // the start row larger than the stop row ranges.add(new RowRange(Bytes.toBytes(80), true, Bytes.toBytes(20), false)); @@ -171,12 +171,12 @@ public class TestMultiRowRangeFilter { @Test public void testMergeAndSortWithoutOverlap() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false)); List actualRanges = MultiRowRangeFilter.sortAndMerge(ranges); - List expectedRanges = new ArrayList(); + List expectedRanges = new ArrayList<>(); expectedRanges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); expectedRanges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); expectedRanges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false)); @@ -185,7 +185,7 @@ public class TestMultiRowRangeFilter { @Test public void testMergeAndSortWithOverlap() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(15), true, Bytes.toBytes(40), false)); ranges.add(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(30), false)); @@ -194,7 +194,7 @@ public class TestMultiRowRangeFilter { ranges.add(new RowRange(Bytes.toBytes(90), true, Bytes.toBytes(100), false)); ranges.add(new RowRange(Bytes.toBytes(95), true, Bytes.toBytes(100), false)); List actualRanges = MultiRowRangeFilter.sortAndMerge(ranges); - List expectedRanges = new ArrayList(); + List expectedRanges = new ArrayList<>(); expectedRanges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(70), false)); expectedRanges.add(new RowRange(Bytes.toBytes(90), true, Bytes.toBytes(100), false)); assertRangesEqual(expectedRanges, actualRanges); @@ -202,22 +202,22 @@ public class TestMultiRowRangeFilter { @Test public void testMergeAndSortWithStartRowInclusive() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(""), false)); List actualRanges = MultiRowRangeFilter.sortAndMerge(ranges); - List expectedRanges = new ArrayList(); + List expectedRanges = new ArrayList<>(); expectedRanges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false)); assertRangesEqual(expectedRanges, actualRanges); } @Test public void testMergeAndSortWithRowExclusive() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(""), false)); List actualRanges = MultiRowRangeFilter.sortAndMerge(ranges); - List expectedRanges = new ArrayList(); + List expectedRanges = new ArrayList<>(); expectedRanges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); expectedRanges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(""), false)); assertRangesEqual(expectedRanges, actualRanges); @@ -225,11 +225,11 @@ public class TestMultiRowRangeFilter { @Test public void testMergeAndSortWithRowInclusive() throws IOException { - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), true)); ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(""), false)); List actualRanges = MultiRowRangeFilter.sortAndMerge(ranges); - List expectedRanges = new ArrayList(); + List expectedRanges = new ArrayList<>(); expectedRanges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false)); assertRangesEqual(expectedRanges, actualRanges); } @@ -255,7 +255,7 @@ public class TestMultiRowRangeFilter { Scan scan = new Scan(); scan.setMaxVersions(); - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(15), true, Bytes.toBytes(40), false)); ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false)); @@ -283,7 +283,7 @@ public class TestMultiRowRangeFilter { Scan scan = new Scan(); scan.setMaxVersions(); - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false)); @@ -309,7 +309,7 @@ public class TestMultiRowRangeFilter { Scan scan = new Scan(); scan.setMaxVersions(); - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false)); ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); @@ -331,7 +331,7 @@ public class TestMultiRowRangeFilter { Scan scan = new Scan(); scan.setMaxVersions(); - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false)); ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); @@ -353,7 +353,7 @@ public class TestMultiRowRangeFilter { Scan scan = new Scan(); scan.setMaxVersions(); - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(40), false)); ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false)); @@ -384,7 +384,7 @@ public class TestMultiRowRangeFilter { Scan scan = new Scan(); scan.setMaxVersions(); - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(40), false)); ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false)); @@ -410,14 +410,14 @@ public class TestMultiRowRangeFilter { Scan scan = new Scan(); scan.setMaxVersions(); - List ranges1 = new ArrayList(); + List ranges1 = new ArrayList<>(); ranges1.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges1.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); ranges1.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false)); MultiRowRangeFilter filter1 = new MultiRowRangeFilter(ranges1); - List ranges2 = new ArrayList(); + List ranges2 = new ArrayList<>(); ranges2.add(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(40), false)); ranges2.add(new RowRange(Bytes.toBytes(80), true, Bytes.toBytes(90), false)); @@ -445,14 +445,14 @@ public class TestMultiRowRangeFilter { Scan scan = new Scan(); scan.setMaxVersions(); - List ranges1 = new ArrayList(); + List ranges1 = new ArrayList<>(); ranges1.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); ranges1.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); ranges1.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false)); MultiRowRangeFilter filter1 = new MultiRowRangeFilter(ranges1); - List ranges2 = new ArrayList(); + List ranges2 = new ArrayList<>(); ranges2.add(new RowRange(Bytes.toBytes(20), true, Bytes.toBytes(40), false)); ranges2.add(new RowRange(Bytes.toBytes(80), true, Bytes.toBytes(90), false)); @@ -523,7 +523,7 @@ public class TestMultiRowRangeFilter { scan.setStopRow(stopRow); } ResultScanner scanner = ht.getScanner(scan); - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); Result r; while ((r = scanner.next()) != null) { for (Cell kv : r.listCells()) { @@ -536,7 +536,7 @@ public class TestMultiRowRangeFilter { private int getResultsSize(Table ht, Scan scan) throws IOException { ResultScanner scanner = ht.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); Result r; while ((r = scanner.next()) != null) { for (Cell kv : r.listCells()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java index d8df298c8a1..d30cb37d097 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java @@ -73,14 +73,13 @@ public class TestMultipleColumnPrefixFilter { List columns = generateRandomWords(10000, "column"); long maxTimestamp = 2; - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); - Map> prefixMap = new HashMap>(); + Map> prefixMap = new HashMap<>(); - prefixMap.put("p", new ArrayList()); - prefixMap.put("q", new ArrayList()); - prefixMap.put("s", new ArrayList()); + prefixMap.put("p", new ArrayList<>()); + prefixMap.put("q", new ArrayList<>()); + prefixMap.put("s", new ArrayList<>()); String valueString = "ValueString"; @@ -112,7 +111,7 @@ public class TestMultipleColumnPrefixFilter { filter = new MultipleColumnPrefixFilter(filter_prefix); scan.setFilter(filter); - List results = new ArrayList(); + List results = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); while (scanner.next(results)) ; @@ -140,14 +139,13 @@ public class TestMultipleColumnPrefixFilter { List columns = generateRandomWords(10000, "column"); long maxTimestamp = 3; - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); - Map> prefixMap = new HashMap>(); + Map> prefixMap = new HashMap<>(); - prefixMap.put("p", new ArrayList()); - prefixMap.put("q", new ArrayList()); - prefixMap.put("s", new ArrayList()); + prefixMap.put("p", new ArrayList<>()); + prefixMap.put("q", new ArrayList<>()); + prefixMap.put("s", new ArrayList<>()); String valueString = "ValueString"; @@ -185,7 +183,7 @@ public class TestMultipleColumnPrefixFilter { filter = new MultipleColumnPrefixFilter(filter_prefix); scan.setFilter(filter); - List results = new ArrayList(); + List results = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); while (scanner.next(results)) ; @@ -230,7 +228,7 @@ public class TestMultipleColumnPrefixFilter { multiplePrefixFilter = new MultipleColumnPrefixFilter(filter_prefix); scan1.setFilter(multiplePrefixFilter); - List results1 = new ArrayList(); + List results1 = new ArrayList<>(); InternalScanner scanner1 = region.getScanner(scan1); while (scanner1.next(results1)) ; @@ -241,7 +239,7 @@ public class TestMultipleColumnPrefixFilter { singlePrefixFilter = new ColumnPrefixFilter(Bytes.toBytes("p")); scan2.setFilter(singlePrefixFilter); - List results2 = new ArrayList(); + List results2 = new ArrayList<>(); InternalScanner scanner2 = region.getScanner(scan1); while (scanner2.next(results2)) ; @@ -252,7 +250,7 @@ public class TestMultipleColumnPrefixFilter { } List generateRandomWords(int numberOfWords, String suffix) { - Set wordSet = new HashSet(); + Set wordSet = new HashSet<>(); for (int i = 0; i < numberOfWords; i++) { int lengthOfWords = (int) (Math.random()*2) + 1; char[] wordChar = new char[lengthOfWords]; @@ -267,7 +265,7 @@ public class TestMultipleColumnPrefixFilter { } wordSet.add(word); } - List wordList = new ArrayList(wordSet); + List wordList = new ArrayList<>(wordSet); return wordList; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java index e23a394e6c8..0ef3ea70336 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java @@ -58,7 +58,7 @@ public class TestSingleColumnValueExcludeFilter { CompareOp.EQUAL, VAL_1); // A 'match' situation - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1); kvs.add (new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER_2, VAL_1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java index 0165b3d23e8..acfe9293d20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java @@ -45,7 +45,7 @@ import org.junit.experimental.categories.Category; @Category({MiscTests.class, SmallTests.class}) public class TestGlobalFilter extends HttpServerFunctionalTest { private static final Log LOG = LogFactory.getLog(HttpServer.class); - static final Set RECORDS = new TreeSet(); + static final Set RECORDS = new TreeSet<>(); /** A very simple filter that records accessed uri's */ static public class RecordingFilter implements Filter { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java index 3b9e852cfad..31b5b8d3268 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java @@ -84,7 +84,7 @@ public class TestHttpServer extends HttpServerFunctionalTest { ) throws ServletException, IOException { PrintWriter out = response.getWriter(); Map params = request.getParameterMap(); - SortedSet keys = new TreeSet(params.keySet()); + SortedSet keys = new TreeSet<>(params.keySet()); for(String key: keys) { out.print(key); out.print(':'); @@ -109,7 +109,7 @@ public class TestHttpServer extends HttpServerFunctionalTest { HttpServletResponse response ) throws ServletException, IOException { PrintWriter out = response.getWriter(); - SortedSet sortedKeys = new TreeSet(); + SortedSet sortedKeys = new TreeSet<>(); Enumeration keys = request.getParameterNames(); while(keys.hasMoreElements()) { sortedKeys.add(keys.nextElement()); @@ -335,7 +335,7 @@ public class TestHttpServer extends HttpServerFunctionalTest { * Custom user->group mapping service. */ public static class MyGroupsProvider extends ShellBasedUnixGroupsMapping { - static Map> mapping = new HashMap>(); + static Map> mapping = new HashMap<>(); static void clearMapping() { mapping.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java index 33618adc12d..3c2de5391af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java @@ -45,7 +45,7 @@ import org.junit.experimental.categories.Category; @Category({MiscTests.class, SmallTests.class}) public class TestPathFilter extends HttpServerFunctionalTest { private static final Log LOG = LogFactory.getLog(HttpServer.class); - static final Set RECORDS = new TreeSet(); + static final Set RECORDS = new TreeSet<>(); /** A very simple filter that records accessed uri's */ static public class RecordingFilter implements Filter { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java index da9519e92e8..bf0e609192e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java @@ -55,7 +55,7 @@ public class JerseyResource { ) throws IOException { LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op); - final Map m = new TreeMap(); + final Map m = new TreeMap<>(); m.put(PATH, path); m.put(OP, op); final String js = JSON.toString(m); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index 86687384d04..234bd7a18ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -206,7 +206,7 @@ public class KeyStoreTestUtil { File sslClientConfFile = new File(sslConfDir + "/ssl-client.xml"); File sslServerConfFile = new File(sslConfDir + "/ssl-server.xml"); - Map certs = new HashMap(); + Map certs = new HashMap<>(); if (useClientCert) { KeyPair cKP = KeyStoreTestUtil.generateKeyPair("RSA"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java index 0da685fbd38..8ee7d3d88da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java @@ -126,7 +126,7 @@ public class TestFileLink { writeSomeData(fs, originalPath, 256 << 20, (byte)2); - List files = new ArrayList(); + List files = new ArrayList<>(); files.add(originalPath); files.add(archivedPath); @@ -194,7 +194,7 @@ public class TestFileLink { assertEquals("hdfs", fs.getUri().getScheme()); try { - List files = new ArrayList(); + List files = new ArrayList<>(); for (int i = 0; i < 3; i++) { Path path = new Path(String.format("test-data-%d", i)); writeSomeData(fs, path, 1 << 20, (byte)i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 0e5f08e086a..6a0921f2af8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -241,7 +241,7 @@ public class TestHalfStoreFileReader { } List genSomeKeys() { - List ret = new ArrayList(SIZE); + List ret = new ArrayList<>(SIZE); for (int i = 0; i < SIZE; i++) { KeyValue kv = new KeyValue( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java index d31af312ba1..7e14228fa60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java @@ -59,7 +59,7 @@ public class TestBufferedDataBlockEncoder { @Test public void testEnsureSpaceForKey() { BufferedDataBlockEncoder.SeekerState state = new BufferedDataBlockEncoder.SeekerState( - new ObjectIntPair(), false); + new ObjectIntPair<>(), false); for (int i = 1; i <= 65536; ++i) { state.keyLength = i; state.ensureSpaceForKey(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java index a2cd50cd09a..97f74afcc4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java @@ -77,8 +77,7 @@ public class TestChangingEncoding { createEncodingsToIterate(); private static final List createEncodingsToIterate() { - List encodings = new ArrayList( - Arrays.asList(DataBlockEncoding.values())); + List encodings = new ArrayList<>(Arrays.asList(DataBlockEncoding.values())); encodings.add(DataBlockEncoding.NONE); return Collections.unmodifiableList(encodings); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index 66fee6afd03..dc15bf5dbaf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -124,7 +124,7 @@ public class TestDataBlockEncoders { */ @Test public void testEmptyKeyValues() throws IOException { - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); byte[] row = new byte[0]; byte[] family = new byte[0]; byte[] qualifier = new byte[0]; @@ -151,7 +151,7 @@ public class TestDataBlockEncoders { */ @Test public void testNegativeTimestamps() throws IOException { - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); byte[] row = new byte[0]; byte[] family = new byte[0]; byte[] qualifier = new byte[0]; @@ -190,8 +190,7 @@ public class TestDataBlockEncoders { List sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags); // create all seekers - List encodedSeekers = - new ArrayList(); + List encodedSeekers = new ArrayList<>(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { LOG.info("Encoding: " + encoding); // Off heap block data support not added for PREFIX_TREE DBE yet. @@ -403,7 +402,7 @@ public class TestDataBlockEncoders { @Test public void testZeroByte() throws IOException { - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); byte[] row = Bytes.toBytes("abcd"); byte[] family = new byte[] { 'f' }; byte[] qualifier0 = new byte[] { 'b' }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java index 0869df6bccd..1b5c630f310 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java @@ -81,7 +81,7 @@ public class TestEncodedSeekers { @Parameters public static Collection parameters() { - List paramList = new ArrayList(); + List paramList = new ArrayList<>(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { for (boolean includeTags : new boolean[] { false, true }) { for (boolean compressTags : new boolean[] { false, true }) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java index e31a73bb4ff..6b1389924b6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java @@ -114,7 +114,7 @@ public class TestPrefixTree { scan.setStopRow(Bytes.toBytes("a-b-A-1:")); RegionScanner scanner = region.getScanner(scan); - List cells = new ArrayList(); + List cells = new ArrayList<>(); for (int i = 0; i < 3; i++) { assertEquals(i < 2, scanner.next(cells)); CellScanner cellScanner = Result.create(cells).cellScanner(); @@ -184,7 +184,7 @@ public class TestPrefixTree { region.flush(true); Scan scan = new Scan(Bytes.toBytes("obj29995")); RegionScanner scanner = region.getScanner(scan); - List cells = new ArrayList(); + List cells = new ArrayList<>(); assertFalse(scanner.next(cells)); assertArrayEquals(Bytes.toBytes("obj3"), Result.create(cells).getRow()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java index fd9b90b80ba..decd39d5654 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTreeEncoding.java @@ -73,14 +73,13 @@ public class TestPrefixTreeEncoding { private static final int NUM_COLS_PER_ROW = 20; private int numBatchesWritten = 0; - private ConcurrentSkipListSet kvset = new ConcurrentSkipListSet( - CellComparator.COMPARATOR); + private ConcurrentSkipListSet kvset = new ConcurrentSkipListSet<>(CellComparator.COMPARATOR); private static boolean formatRowNum = false; @Parameters public static Collection parameters() { - List paramList = new ArrayList(); + List paramList = new ArrayList<>(); { paramList.add(new Object[] { false }); paramList.add(new Object[] { true }); @@ -228,7 +227,7 @@ public class TestPrefixTreeEncoding { private void verifySeeking(EncodedSeeker encodeSeeker, ByteBuffer encodedData, int batchId) { - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) { kvList.clear(); encodeSeeker.setCurrentBuffer(new SingleByteBuff(encodedData)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java index 2826694bb66..3bf189df405 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java @@ -92,7 +92,7 @@ public class TestSeekBeforeWithReverseScan { scan.setFilter(new FirstKeyOnlyFilter()); scan.addFamily(cfName); RegionScanner scanner = region.getScanner(scan); - List res = new ArrayList(); + List res = new ArrayList<>(); int count = 1; while (scanner.next(res)) { count++; @@ -130,7 +130,7 @@ public class TestSeekBeforeWithReverseScan { scan.setFilter(new FirstKeyOnlyFilter()); scan.addFamily(cfName); RegionScanner scanner = region.getScanner(scan); - List res = new ArrayList(); + List res = new ArrayList<>(); int count = 1; while (scanner.next(res)) { count++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java index 46cc9f983c1..cc70dc1ba81 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java @@ -63,7 +63,7 @@ public class TestSeekToBlockWithEncoders { */ @Test public void testSeekToBlockWithNonMatchingSeekKey() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -89,7 +89,7 @@ public class TestSeekToBlockWithEncoders { */ @Test public void testSeekingToBlockWithBiggerNonLength1() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -115,7 +115,7 @@ public class TestSeekToBlockWithEncoders { */ @Test public void testSeekingToBlockToANotAvailableKey() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -141,7 +141,7 @@ public class TestSeekToBlockWithEncoders { */ @Test public void testSeekToBlockWithDecreasingCommonPrefix() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -160,7 +160,7 @@ public class TestSeekToBlockWithEncoders { @Test public void testSeekToBlockWithDiffQualifer() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -180,7 +180,7 @@ public class TestSeekToBlockWithEncoders { @Test public void testSeekToBlockWithDiffQualiferOnSameRow() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -203,7 +203,7 @@ public class TestSeekToBlockWithEncoders { @Test public void testSeekToBlockWithDiffQualiferOnSameRow1() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -226,7 +226,7 @@ public class TestSeekToBlockWithEncoders { @Test public void testSeekToBlockWithDiffQualiferOnSameRowButDescendingInSize() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -249,7 +249,7 @@ public class TestSeekToBlockWithEncoders { @Test public void testSeekToBlockWithDiffFamilyAndQualifer() throws IOException { - List sampleKv = new ArrayList(); + List sampleKv = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val")); sampleKv.add(kv1); @@ -270,7 +270,7 @@ public class TestSeekToBlockWithEncoders { private void seekToTheKey(KeyValue expected, List kvs, Cell toSeek) throws IOException { // create all seekers - List encodedSeekers = new ArrayList(); + List encodedSeekers = new ArrayList<>(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { if (encoding.getEncoder() == null || encoding == DataBlockEncoding.PREFIX_TREE) { continue; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index bd3f4c7fea6..8d947665c52 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -77,7 +77,7 @@ public class CacheTestUtils { conf); final AtomicInteger totalQueries = new AtomicInteger(); - final ConcurrentLinkedQueue blocksToTest = new ConcurrentLinkedQueue(); + final ConcurrentLinkedQueue blocksToTest = new ConcurrentLinkedQueue<>(); final AtomicInteger hits = new AtomicInteger(); final AtomicInteger miss = new AtomicInteger(); @@ -344,7 +344,7 @@ public class CacheTestUtils { public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) { HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks]; Random rand = new Random(); - HashSet usedStrings = new HashSet(); + HashSet usedStrings = new HashSet<>(); for (int i = 0; i < numBlocks; i++) { ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize); rand.nextBytes(cachedBuffer.array()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java index 49f57de6e29..bbc612f2427 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomDistribution.java @@ -128,8 +128,8 @@ public class RandomDistribution { throw new IllegalArgumentException("Invalid arguments"); } random = r; - k = new ArrayList(); - v = new ArrayList(); + k = new ArrayList<>(); + v = new ArrayList<>(); double sum = 0; int last = -1; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 8f9c4f7dee1..3315b6f75ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -154,7 +154,7 @@ public class TestCacheOnWrite { private static List getBlockCaches() throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); - List blockcaches = new ArrayList(); + List blockcaches = new ArrayList<>(); // default blockcaches.add(new CacheConfig(conf).getBlockCache()); @@ -176,7 +176,7 @@ public class TestCacheOnWrite { @Parameters public static Collection getParameters() throws IOException { - List params = new ArrayList(); + List params = new ArrayList<>(); for (BlockCache blockCache : getBlockCaches()) { for (CacheOnWriteType cowType : CacheOnWriteType.values()) { for (Compression.Algorithm compress : HBaseTestingUtility.COMPRESSION_ALGORITHMS) { @@ -261,12 +261,11 @@ public class TestCacheOnWrite { assertTrue(testDescription, scanner.seekTo()); long offset = 0; - EnumMap blockCountByType = - new EnumMap(BlockType.class); + EnumMap blockCountByType = new EnumMap<>(BlockType.class); DataBlockEncoding encodingInCache = NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding(); - List cachedBlocksOffset = new ArrayList(); - Map cachedBlocks = new HashMap(); + List cachedBlocksOffset = new ArrayList<>(); + Map cachedBlocks = new HashMap<>(); while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { // Flags: don't cache the block, use pread, this is not a compaction. // Also, pass null for expected block type to avoid checking it. @@ -383,7 +382,7 @@ public class TestCacheOnWrite { KeyValue kv; if(useTags) { Tag t = new ArrayBackedTag((byte) 1, "visibility"); - List tagList = new ArrayList(); + List tagList = new ArrayList<>(); tagList.add(t); Tag[] tags = new Tag[1]; tags[0] = t; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index 95063ce7e5d..6145eca14a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -77,7 +77,7 @@ public class TestFixedFileTrailer { @Parameters public static Collection getParameters() { - List versionsToTest = new ArrayList(); + List versionsToTest = new ArrayList<>(); for (int v = HFile.MIN_FORMAT_VERSION; v <= HFile.MAX_FORMAT_VERSION; ++v) versionsToTest.add(new Integer[] { v } ); return versionsToTest; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index c75232a1eb6..1c87af4d8e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -123,7 +123,7 @@ public class TestHFileBlock { static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesMemstoreTS, boolean useTag) throws IOException { - List keyValues = new ArrayList(); + List keyValues = new ArrayList<>(); Random randomizer = new Random(42l + seed); // just any fixed number // generate keyValues @@ -383,8 +383,8 @@ public class TestHFileBlock { .build(); HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder, meta); long totalSize = 0; - final List encodedSizes = new ArrayList(); - final List encodedBlocks = new ArrayList(); + final List encodedSizes = new ArrayList<>(); + final List encodedBlocks = new ArrayList<>(); for (int blockId = 0; blockId < numBlocks; ++blockId) { hbw.startWriting(BlockType.DATA); writeTestKeyValues(hbw, blockId, includesMemstoreTS, includesTag); @@ -532,11 +532,10 @@ public class TestHFileBlock { ", pread=" + pread + ", cacheOnWrite=" + cacheOnWrite); Path path = new Path(TEST_UTIL.getDataTestDir(), "prev_offset"); - List expectedOffsets = new ArrayList(); - List expectedPrevOffsets = new ArrayList(); - List expectedTypes = new ArrayList(); - List expectedContents = cacheOnWrite - ? new ArrayList() : null; + List expectedOffsets = new ArrayList<>(); + List expectedPrevOffsets = new ArrayList<>(); + List expectedTypes = new ArrayList<>(); + List expectedContents = cacheOnWrite ? new ArrayList<>() : null; long totalSize = writeBlocks(rand, algo, path, expectedOffsets, expectedPrevOffsets, expectedTypes, expectedContents); @@ -718,8 +717,8 @@ public class TestHFileBlock { for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) { Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading"); Random rand = defaultRandom(); - List offsets = new ArrayList(); - List types = new ArrayList(); + List offsets = new ArrayList<>(); + List types = new ArrayList<>(); writeBlocks(rand, compressAlgo, path, offsets, null, types, null); FSDataInputStream is = fs.open(path); long fileSize = fs.getFileStatus(path).getLen(); @@ -732,8 +731,7 @@ public class TestHFileBlock { HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, fileSize, meta); Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS); - ExecutorCompletionService ecs = - new ExecutorCompletionService(exec); + ExecutorCompletionService ecs = new ExecutorCompletionService<>(exec); for (int i = 0; i < NUM_READER_THREADS; ++i) { ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr, @@ -768,7 +766,7 @@ public class TestHFileBlock { .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM) .build(); HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta); - Map prevOffsetByType = new HashMap(); + Map prevOffsetByType = new HashMap<>(); long totalSize = 0; for (int i = 0; i < NUM_TEST_BLOCKS; ++i) { long pos = os.getPos(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index ce6ec82ab94..28930dbf453 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -95,7 +95,7 @@ public class TestHFileBlockIndex { private long rootIndexOffset; private int numRootEntries; private int numLevels; - private static final List keys = new ArrayList(); + private static final List keys = new ArrayList<>(); private final Compression.Algorithm compr; private byte[] firstKeyInFile; private Configuration conf; @@ -604,7 +604,7 @@ public class TestHFileBlockIndex { blockCache.evictBlocksByHfileName(hfilePath.getName()); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, indexBlockSize); - Set keyStrSet = new HashSet(); + Set keyStrSet = new HashSet<>(); byte[][] keys = new byte[NUM_KV][]; byte[][] values = new byte[NUM_KV][]; @@ -674,7 +674,7 @@ public class TestHFileBlockIndex { HFileBlock.BlockIterator iter = fsReader.blockRange(0, reader.getTrailer().getLoadOnOpenDataOffset()); HFileBlock block; - List blockKeys = new ArrayList(); + List blockKeys = new ArrayList<>(); while ((block = iter.nextBlock()) != null) { if (block.getBlockType() != BlockType.LEAF_INDEX) return; @@ -762,7 +762,7 @@ public class TestHFileBlockIndex { HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) .withFileContext(context) .withPath(fs, hfPath).create(); - List keys = new ArrayList(); + List keys = new ArrayList<>(); // This should result in leaf-level indices and a root level index for (int i=0; i < 100; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index 387514e980e..ac939d1f772 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -243,8 +243,7 @@ public class TestHFileDataBlockEncoder { */ @Parameters public static Collection getAllConfigurations() { - List configurations = - new ArrayList(); + List configurations = new ArrayList<>(); for (DataBlockEncoding diskAlgo : DataBlockEncoding.values()) { for (boolean includesMemstoreTS : new boolean[] { false, true }) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java index af4f2b86ad3..f1528c2140b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -58,7 +58,7 @@ public class TestHFileInlineToRootChunkConversion { HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) .withFileContext(context) .withPath(fs, hfPath).create(); - List keys = new ArrayList(); + List keys = new ArrayList<>(); StringBuilder sb = new StringBuilder(); for (int i = 0; i < 4; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index 983ec2f2ffa..fe6b549faf5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -128,7 +128,7 @@ public class TestHFileWriterV3 { .create(); Random rand = new Random(9713312); // Just a fixed seed. - List keyValues = new ArrayList(entryCount); + List keyValues = new ArrayList<>(entryCount); for (int i = 0; i < entryCount; ++i) { byte[] keyBytes = RandomKeyValueUtil.randomOrderedKey(rand, i); @@ -137,7 +137,7 @@ public class TestHFileWriterV3 { byte[] valueBytes = RandomKeyValueUtil.randomValue(rand); KeyValue keyValue = null; if (useTags) { - ArrayList tags = new ArrayList(); + ArrayList tags = new ArrayList<>(); for (int j = 0; j < 1 + rand.nextInt(4); j++) { byte[] tagBytes = new byte[16]; rand.nextBytes(tagBytes); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java index cf3c6ed1ad7..9253ce16817 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java @@ -122,7 +122,7 @@ public class TestLazyDataBlockDecompression { reader.loadFileInfo(); long offset = trailer.getFirstDataBlockOffset(), max = trailer.getLastDataBlockOffset(); - List blocks = new ArrayList(4); + List blocks = new ArrayList<>(4); HFileBlock block; while (offset <= max) { block = reader.readBlock(offset, -1, /* cacheBlock */ true, /* pread */ false, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index 90e398d9b8a..a9ecf7b0059 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -72,8 +72,8 @@ public class TestReseekTo { String valueString = "Value"; - List keyList = new ArrayList(); - List valueList = new ArrayList(); + List keyList = new ArrayList<>(); + List valueList = new ArrayList<>(); for (int key = 0; key < numberOfKeys; key++) { String value = valueString + key; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java index 94e7219e605..f1775d0e470 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java @@ -208,7 +208,7 @@ public class TestScannerFromBucketCache { Scan scan = new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(10); - actual = new ArrayList(); + actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(actual); @@ -314,7 +314,7 @@ public class TestScannerFromBucketCache { } // Expected - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(kv13); expected.add(kv12); expected.add(kv23); @@ -326,7 +326,7 @@ public class TestScannerFromBucketCache { Scan scan = new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(MAX_VERSIONS); - List actual = new ArrayList(); + List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(actual); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java index 9c6bb3845ad..c834fca549a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java @@ -61,7 +61,7 @@ public class TestScannerSelectionUsingKeyRange { private static final int NUM_ROWS = 8; private static final int NUM_COLS_PER_ROW = 5; private static final int NUM_FILES = 2; - private static final Map TYPE_COUNT = new HashMap(3); + private static final Map TYPE_COUNT = new HashMap<>(3); static { TYPE_COUNT.put(BloomType.ROWCOL, 0); TYPE_COUNT.put(BloomType.ROW, 0); @@ -73,7 +73,7 @@ public class TestScannerSelectionUsingKeyRange { @Parameters public static Collection parameters() { - List params = new ArrayList(); + List params = new ArrayList<>(); for (Object type : TYPE_COUNT.keySet()) { params.add(new Object[] { type, TYPE_COUNT.get(type) }); } @@ -120,7 +120,7 @@ public class TestScannerSelectionUsingKeyRange { LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache(); cache.clearCache(); InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results)) { } scanner.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 08b259d7c13..4af48ce92d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -79,7 +79,7 @@ public class TestScannerSelectionUsingTTL { @Parameters public static Collection parameters() { - List params = new ArrayList(); + List params = new ArrayList<>(); for (int numFreshFiles = 1; numFreshFiles <= 3; ++numFreshFiles) { for (boolean explicitCompaction : new boolean[] { false, true }) { params.add(new Object[] { numFreshFiles, explicitCompaction }); @@ -135,7 +135,7 @@ public class TestScannerSelectionUsingTTL { LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache(); cache.clearCache(); InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW; int numReturnedRows = 0; LOG.info("Scanning the entire table"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index a8fe3f0f415..d654bce13ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -65,7 +65,7 @@ public class TestSeekTo { private final DataBlockEncoding encoding; @Parameters public static Collection parameters() { - List paramList = new ArrayList(); + List paramList = new ArrayList<>(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { paramList.add(new Object[] { encoding }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index 6fe352da504..0f16bfa5e94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -143,10 +143,10 @@ public class TestBucketCache { final List BLOCKSIZES = Arrays.asList(4 * 1024, 8 * 1024, 64 * 1024, 96 * 1024); boolean full = false; - ArrayList allocations = new ArrayList(); + ArrayList allocations = new ArrayList<>(); // Fill the allocated extents by choosing a random blocksize. Continues selecting blocks until // the cache is completely filled. - List tmp = new ArrayList(BLOCKSIZES); + List tmp = new ArrayList<>(BLOCKSIZES); while (!full) { Integer blockSize = null; try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java index 4d3f5506fa4..cfba69a768d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java @@ -169,7 +169,7 @@ public class TestBucketWriterThread { private static void doDrainOfOneEntry(final BucketCache bc, final BucketCache.WriterThread wt, final BlockingQueue q) throws InterruptedException { - List rqes = BucketCache.getRAMQueueEntries(q, new ArrayList(1)); + List rqes = BucketCache.getRAMQueueEntries(q, new ArrayList<>(1)); wt.doDrain(rqes); assertTrue(q.isEmpty()); assertTrue(bc.ramCache.isEmpty()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index 3535d232437..04ac5191eca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -216,7 +216,7 @@ public class TestSimpleRpcScheduler {/* when(priority.getDeadline(eq(largeHead), any(Message.class))).thenReturn(50L); when(priority.getDeadline(eq(hugeHead), any(Message.class))).thenReturn(100L); - final ArrayList work = new ArrayList(); + final ArrayList work = new ArrayList<>(); doAnswerTaskExecution(smallCallTask, work, 10, 250); doAnswerTaskExecution(largeCallTask, work, 50, 250); doAnswerTaskExecution(hugeCallTask, work, 100, 250); @@ -312,7 +312,7 @@ public class TestSimpleRpcScheduler {/* when(scanCall.getHeader()).thenReturn(scanHead); when(scanCall.getParam()).thenReturn(scanCall.param); - ArrayList work = new ArrayList(); + ArrayList work = new ArrayList<>(); doAnswerTaskExecution(putCallTask, work, 1, 1000); doAnswerTaskExecution(getCallTask, work, 2, 1000); doAnswerTaskExecution(scanCallTask, work, 3, 1000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java index fd0db6a563c..22dda357846 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java @@ -218,7 +218,7 @@ public class TestTableMapReduceUtil { OutputCollector output, Reporter reporter) throws IOException { String strKey = Bytes.toString(key.get()); - List result = new ArrayList(); + List result = new ArrayList<>(); while (values.hasNext()) result.add(values.next()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index ca727e4b052..47421f141d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -234,7 +234,7 @@ public abstract class MultiTableInputFormatTestBase { c.set(KEY_STARTROW, start != null ? start : ""); c.set(KEY_LASTROW, last != null ? last : ""); - List scans = new ArrayList(); + List scans = new ArrayList<>(); for (String tableName : TABLES) { Scan scan = new Scan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java index 92888ed5e0f..efacca91ba0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java @@ -46,15 +46,14 @@ public class NMapInputFormat extends InputFormat { public RecordReader createRecordReader( InputSplit split, TaskAttemptContext tac) throws IOException, InterruptedException { - return new SingleRecordReader( - NullWritable.get(), NullWritable.get()); + return new SingleRecordReader<>(NullWritable.get(), NullWritable.get()); } @Override public List getSplits(JobContext context) throws IOException, InterruptedException { int count = getNumMapTasks(context.getConfiguration()); - List splits = new ArrayList(count); + List splits = new ArrayList<>(count); for (int i = 0; i < count; i++) { splits.add(new NullInputSplit()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java index fc7b1020424..b7fdb47ccc5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java @@ -51,7 +51,7 @@ public class TestGroupingTableMapper { Mapper.Context context = mock(Mapper.Context.class); context.write(any(ImmutableBytesWritable.class), any(Result.class)); - List keyValue = new ArrayList(); + List keyValue = new ArrayList<>(); byte[] row = {}; keyValue.add(new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes .toBytes("value1"))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 52b2901310f..3c1bed8b675 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -448,7 +448,7 @@ public class TestHFileOutputFormat2 { writer = hof.getRecordWriter(context); final byte [] b = Bytes.toBytes("b"); - List< Tag > tags = new ArrayList(); + List< Tag > tags = new ArrayList<>(); tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670))); KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags); writer.write(new ImmutableBytesWritable(), kv); @@ -735,8 +735,7 @@ public class TestHFileOutputFormat2 { */ private Map getMockColumnFamiliesForCompression (int numCfs) { - Map familyToCompression - = new HashMap(); + Map familyToCompression = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToCompression.put("Family1!@#!@#&", Compression.Algorithm.LZO); @@ -809,8 +808,7 @@ public class TestHFileOutputFormat2 { */ private Map getMockColumnFamiliesForBloomType (int numCfs) { - Map familyToBloomType = - new HashMap(); + Map familyToBloomType = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBloomType.put("Family1!@#!@#&", BloomType.ROW); @@ -881,8 +879,7 @@ public class TestHFileOutputFormat2 { */ private Map getMockColumnFamiliesForBlockSize (int numCfs) { - Map familyToBlockSize = - new HashMap(); + Map familyToBlockSize = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBlockSize.put("Family1!@#!@#&", 1234); @@ -956,8 +953,7 @@ public class TestHFileOutputFormat2 { */ private Map getMockColumnFamiliesForDataBlockEncoding (int numCfs) { - Map familyToDataBlockEncoding = - new HashMap(); + Map familyToDataBlockEncoding = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index abb600d223a..2867f130bf1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -59,7 +59,7 @@ public class TestHRegionPartitioner { UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, Bytes.toBytes("aa"), Bytes.toBytes("cc"), 3); - HRegionPartitioner partitioner = new HRegionPartitioner(); + HRegionPartitioner partitioner = new HRegionPartitioner<>(); Configuration configuration = UTIL.getConfiguration(); configuration.set(TableOutputFormat.OUTPUT_TABLE, name.getMethodName()); partitioner.setConf(configuration); @@ -68,4 +68,4 @@ public class TestHRegionPartitioner { assertEquals(1, partitioner.getPartition(writable, 10L, 3)); assertEquals(0, partitioner.getPartition(writable, 10L, 1)); } -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index 75d40a18620..a7642af0b5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -146,8 +146,7 @@ public class TestHashTable { .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) .build(); - Map actualHashes - = new HashMap(); + Map actualHashes = new HashMap<>(); Path dataDir = new Path(testDir, HashTable.HASH_DATA_DIR); for (int i = 0; i < numHashFiles; i++) { Path hashPath = new Path(dataDir, HashTable.TableHash.getDataFileName(i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 7de012e4238..1866a35e228 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -628,7 +628,7 @@ public class TestImportExport { public void testAddFilterAndArguments() throws IOException { Configuration configuration = new Configuration(); - List args = new ArrayList(); + List args = new ArrayList<>(); args.add("param1"); args.add("param2"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 26f8deabfa1..6d9b05b3937 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -176,7 +176,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable { } // run the import - List argv = new ArrayList(Arrays.asList(args)); + List argv = new ArrayList<>(Arrays.asList(args)); argv.add(inputPath.toString()); Tool tool = new ImportTsv(); LOG.debug("Running ImportTsv with arguments: " + argv); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index 21cae54d79b..4ab3d29a9fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -138,7 +138,7 @@ public class TestImportTSVWithTTLs implements Configurable { } // run the import - List argv = new ArrayList(Arrays.asList(args)); + List argv = new ArrayList<>(Arrays.asList(args)); argv.add(inputPath.toString()); Tool tool = new ImportTsv(); LOG.debug("Running ImportTsv with arguments: " + argv); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index 50d6b1818ec..b8d973be764 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -357,7 +357,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { } // run the import - List argv = new ArrayList(Arrays.asList(args)); + List argv = new ArrayList<>(Arrays.asList(args)); argv.add(inputPath.toString()); Tool tool = new ImportTsv(); LOG.debug("Running ImportTsv with arguments: " + argv); @@ -397,9 +397,9 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { // validate number and content of output columns LOG.debug("Validating HFiles."); - Set configFamilies = new HashSet(); + Set configFamilies = new HashSet<>(); configFamilies.add(family); - Set foundFamilies = new HashSet(); + Set foundFamilies = new HashSet<>(); int actualKVCount = 0; for (FileStatus cfStatus : fs.listStatus(new Path(outputPath), new OutputFilesFilter())) { LOG.debug("The output path has files"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index fd51544d6b9..b7d5c6f539c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -114,7 +114,7 @@ public class TestImportTsv implements Configurable { @Before public void setup() throws Exception { tn = TableName.valueOf("test-" + UUID.randomUUID()); - args = new HashMap(); + args = new HashMap<>(); // Prepare the arguments required for the test. args.put(ImportTsv.COLUMNS_CONF_KEY, "HBASE_ROW_KEY,FAM:A,FAM:B"); args.put(ImportTsv.SEPARATOR_CONF_KEY, "\u001b"); @@ -515,9 +515,9 @@ public class TestImportTsv implements Configurable { int expectedKVCount) throws IOException { // validate number and content of output columns LOG.debug("Validating HFiles."); - Set configFamilies = new HashSet(); + Set configFamilies = new HashSet<>(); configFamilies.add(family); - Set foundFamilies = new HashSet(); + Set foundFamilies = new HashSet<>(); int actualKVCount = 0; for (FileStatus cfStatus : fs.listStatus(new Path(outputPath), new OutputFilesFilter())) { String[] elements = cfStatus.getPath().toString().split(Path.SEPARATOR); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java index 81e0a700748..f5694466647 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java @@ -52,7 +52,7 @@ public class TestImportTsvParser { } private void checkParsing(ParsedLine parsed, Iterable expected) { - ArrayList parsedCols = new ArrayList(); + ArrayList parsedCols = new ArrayList<>(); for (int i = 0; i < parsed.getColumnCount(); i++) { parsedCols.add(Bytes.toString(parsed.getLineBytes(), parsed.getColumnOffset(i), parsed.getColumnLength(i))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index 7167c199b59..a6dacf789d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -323,7 +323,7 @@ public class TestLoadIncrementalHFiles { list = new ArrayList<>(); } if (useMap) { - map = new TreeMap>(Bytes.BYTES_COMPARATOR); + map = new TreeMap<>(Bytes.BYTES_COMPARATOR); map.put(FAMILY, list); } Path last = null; @@ -630,7 +630,7 @@ public class TestLoadIncrementalHFiles { @Test(timeout = 120000) public void testInferBoundaries() { - TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); /* Toy example * c---------i o------p s---------t v------x diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiHFileOutputFormat.java index 738ae5ff9b8..958ed83f6ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiHFileOutputFormat.java @@ -169,7 +169,7 @@ public class TestMultiHFileOutputFormat { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; - ArrayList tables = new ArrayList(); + ArrayList tables = new ArrayList<>(); for (int i = 0; i < TABLES.length; i++) { tables.add(new ImmutableBytesWritable(TABLES[i])); } @@ -204,7 +204,7 @@ public class TestMultiHFileOutputFormat { protected void reduce(ImmutableBytesWritable table, java.lang.Iterable kvs, org.apache.hadoop.mapreduce.Reducer.Context context) throws java.io.IOException, InterruptedException { - TreeSet map = new TreeSet(KeyValue.COMPARATOR); + TreeSet map = new TreeSet<>(KeyValue.COMPARATOR); for (KeyValue kv : kvs) { try { map.add(kv.clone()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index cd831998b8f..3b84e2d2d34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -319,7 +319,7 @@ public class TestRowCounter { final byte[] col1 = Bytes.toBytes(COL1); final byte[] col2 = Bytes.toBytes(COL2); final byte[] col3 = Bytes.toBytes(COMPOSITE_COLUMN); - ArrayList rowsUpdate = new ArrayList(); + ArrayList rowsUpdate = new ArrayList<>(); // write few rows with two columns int i = 0; for (; i < totalRows - rowsWithOneCol; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java index 119df800fce..0f41f336777 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java @@ -42,8 +42,7 @@ public class TestSimpleTotalOrderPartitioner { public void testSplit() throws Exception { String start = "a"; String end = "{"; - SimpleTotalOrderPartitioner p = - new SimpleTotalOrderPartitioner(); + SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); this.conf.set(SimpleTotalOrderPartitioner.START, start); this.conf.set(SimpleTotalOrderPartitioner.END, end); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java index f1cda3c32b2..4382c9c6bc1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java @@ -48,7 +48,7 @@ public class TestTableSplit { "row-end".getBytes(), "location"); assertEquals (split1, split2); assertTrue (split1.hashCode() == split2.hashCode()); - HashSet set = new HashSet(2); + HashSet set = new HashSet<>(2); set.add(split1); set.add(split2); assertTrue(set.size() == 1); @@ -68,7 +68,7 @@ public class TestTableSplit { assertEquals (split1, split2); assertTrue (split1.hashCode() == split2.hashCode()); - HashSet set = new HashSet(2); + HashSet set = new HashSet<>(2); set.add(split1); set.add(split2); assertTrue(set.size() == 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index 8b7cdd72610..6796c944adc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -68,8 +68,7 @@ public class TestTimeRangeMapRed { private Admin admin; private static final byte [] KEY = Bytes.toBytes("row1"); - private static final NavigableMap TIMESTAMP = - new TreeMap(); + private static final NavigableMap TIMESTAMP = new TreeMap<>(); static { TIMESTAMP.put((long)1245620000, false); TIMESTAMP.put((long)1245620005, true); // include @@ -112,7 +111,7 @@ public class TestTimeRangeMapRed { public void map(ImmutableBytesWritable key, Result result, Context context) throws IOException { - List tsList = new ArrayList(); + List tsList = new ArrayList<>(); for (Cell kv : result.listCells()) { tsList.add(kv.getTimestamp()); } @@ -152,7 +151,7 @@ public class TestTimeRangeMapRed { col.setMaxVersions(Integer.MAX_VALUE); desc.addFamily(col); admin.createTable(desc); - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (Map.Entry entry : TIMESTAMP.entrySet()) { Put put = new Put(KEY); put.setDurability(Durability.SKIP_WAL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 7e142bce86a..427c5cc3ebe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -172,7 +172,7 @@ public class TestWALPlayer { when(context.getConfiguration()).thenReturn(configuration); WALEdit value = mock(WALEdit.class); - ArrayList values = new ArrayList(); + ArrayList values = new ArrayList<>(); KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"), null); values.add(kv1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index fa1b9f4d0ab..34725b4dce2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -81,8 +81,7 @@ public class TestWALRecordReader { private static HTableDescriptor htd; private static Path logDir; protected MultiVersionConcurrencyControl mvcc; - protected static NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + protected static NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); private static String getName() { return "TestWALRecordReader"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 467d4a5836f..a5fe9528547 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -143,14 +143,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { * key, need to use TreeMap and provide a Comparator. Use * {@link #setGetResult(byte[], byte[], Result)} filling this map. */ - private final Map> gets = - new TreeMap>(Bytes.BYTES_COMPARATOR); + private final Map> gets = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Map of regions to results to return when scanning. */ - private final Map nexts = - new TreeMap(Bytes.BYTES_COMPARATOR); + private final Map nexts = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Data structure that holds regionname and index used scanning. @@ -177,8 +175,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { /** * Outstanding scanners and their offset into nexts */ - private final Map scannersAndOffsets = - new HashMap(); + private final Map scannersAndOffsets = new HashMap<>(); /** * @param sn Name of this mock regionserver @@ -203,7 +200,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { if (value == null) { // If no value already, create one. Needs to be treemap because we are // using byte array as key. Not thread safe. - value = new TreeMap(Bytes.BYTES_COMPARATOR); + value = new TreeMap<>(Bytes.BYTES_COMPARATOR); this.gets.put(regionName, value); } value.put(row, r); @@ -402,7 +399,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { Result result = next(scannerId); if (result != null) { builder.addCellsPerResult(result.size()); - List results = new ArrayList(1); + List results = new ArrayList<>(1); results.add(result); ((HBaseRpcController) controller).setCellScanner(CellUtil .createCellScanner(results)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java index 5100a2bd409..78b75d586eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java @@ -333,14 +333,14 @@ public class TestAssignmentListener { // We'll start with 2 servers in draining that existed before the // HMaster started. - ArrayList drainingServers = new ArrayList(); + ArrayList drainingServers = new ArrayList<>(); drainingServers.add(SERVERNAME_A); drainingServers.add(SERVERNAME_B); // We'll have 2 servers that come online AFTER the DrainingServerTracker // is started (just as we see when we failover to the Backup HMaster). // One of these will already be a draining server. - HashMap onlineServers = new HashMap(); + HashMap onlineServers = new HashMap<>(); onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD); onlineServers.put(SERVERNAME_C, ServerLoad.EMPTY_SERVERLOAD); @@ -370,7 +370,7 @@ public class TestAssignmentListener { new ArrayList()); // checkAndRecordNewServer() is how servers are added to the ServerManager. - ArrayList onlineDrainingServers = new ArrayList(); + ArrayList onlineDrainingServers = new ArrayList<>(); for (ServerName sn : onlineServers.keySet()){ // Here's the actual test. serverManager.checkAndRecordNewServer(sn, onlineServers.get(sn)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 242b0120d55..449e1e6ab08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -624,7 +624,7 @@ public class TestAssignmentManagerOnCluster { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); AssignmentManager am = master.getAssignmentManager(); - Map regions = new HashMap(); + Map regions = new HashMap<>(); ServerName dest = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); regions.put(hri, dest); // retainAssignment but balancer cannot find a plan @@ -838,7 +838,7 @@ public class TestAssignmentManagerOnCluster { assertNotNull(destServerName); assertFalse("Region should be assigned on a new region server", oldServerName.equals(destServerName)); - List regions = new ArrayList(); + List regions = new ArrayList<>(); regions.add(hri); am.assign(destServerName, regions); @@ -1214,8 +1214,8 @@ public class TestAssignmentManagerOnCluster { rss.start(); // Create 10 threads and make each do 10 puts related to region state update Thread[] th = new Thread[10]; - List nameList = new ArrayList(); - List tableNameList = new ArrayList(); + List nameList = new ArrayList<>(); + List tableNameList = new ArrayList<>(); for (int i = 0; i < th.length; i++) { th[i] = new Thread() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 52b58f16233..cc73d9df0c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -340,8 +340,7 @@ public class TestCatalogJanitor { // First test that our Comparator works right up in CatalogJanitor. // Just fo kicks. - SortedMap regions = - new TreeMap(new CatalogJanitor.SplitParentFirstComparator()); + SortedMap regions = new TreeMap<>(new CatalogJanitor.SplitParentFirstComparator()); // Now make sure that this regions map sorts as we expect it to. regions.put(parent, createResult(parent, splita, splitb)); regions.put(splitb, createResult(splitb, splitba, splitbb)); @@ -434,16 +433,14 @@ public class TestCatalogJanitor { new byte[0]); Thread.sleep(1001); - final Map splitParents = - new TreeMap(new SplitParentFirstComparator()); + final Map splitParents = new TreeMap<>(new SplitParentFirstComparator()); splitParents.put(parent, createResult(parent, splita, splitb)); splita.setOffline(true); //simulate that splita goes offline when it is split splitParents.put(splita, createResult(splita, splitaa,splitab)); - final Map mergedRegions = new TreeMap(); + final Map mergedRegions = new TreeMap<>(); CatalogJanitor janitor = spy(new CatalogJanitor(services)); - doReturn(new Triple, Map>( - 10, mergedRegions, splitParents)).when(janitor) + doReturn(new Triple<>(10, mergedRegions, splitParents)).when(janitor) .getMergedRegionsAndSplitParents(); //create ref from splita to parent diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java index 5d47edec261..68cab5a0ac4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java @@ -49,7 +49,7 @@ public class TestClusterStatusPublisher { ClusterStatusPublisher csp = new ClusterStatusPublisher() { @Override protected List> getDeadServers(long since) { - return new ArrayList>(); + return new ArrayList<>(); } }; @@ -61,10 +61,10 @@ public class TestClusterStatusPublisher { ClusterStatusPublisher csp = new ClusterStatusPublisher() { @Override protected List> getDeadServers(long since) { - List> res = new ArrayList>(); + List> res = new ArrayList<>(); switch ((int) EnvironmentEdgeManager.currentTime()) { case 2: - res.add(new Pair(ServerName.valueOf("hn", 10, 10), 1L)); + res.add(new Pair<>(ServerName.valueOf("hn", 10, 10), 1L)); break; case 1000: break; @@ -87,9 +87,9 @@ public class TestClusterStatusPublisher { ClusterStatusPublisher csp = new ClusterStatusPublisher() { @Override protected List> getDeadServers(long since) { - List> res = new ArrayList>(); + List> res = new ArrayList<>(); for (int i = 0; i < 25; i++) { - res.add(new Pair(ServerName.valueOf("hn" + i, 10, 10), 20L)); + res.add(new Pair<>(ServerName.valueOf("hn" + i, 10, 10), 20L)); } return res; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index d0b8494aec2..4c8728f5344 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -329,7 +329,7 @@ public class TestDistributedLogSplitting { private final PerClientRandomNonceGenerator delegate = PerClientRandomNonceGenerator.get(); private boolean isDups = false; - private LinkedList nonces = new LinkedList(); + private LinkedList nonces = new LinkedList<>(); public void startDups() { isDups = true; @@ -370,7 +370,7 @@ public class TestDistributedLogSplitting { (ClusterConnection)TEST_UTIL.getConnection(), ng); try { - List reqs = new ArrayList(); + List reqs = new ArrayList<>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); List hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices()); @@ -693,7 +693,7 @@ public class TestDistributedLogSplitting { try { final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager(); - Set regionSet = new HashSet(); + Set regionSet = new HashSet<>(); HRegionInfo region = null; HRegionServer hrs = null; ServerName firstFailedServer = null; @@ -942,7 +942,7 @@ public class TestDistributedLogSplitting { try { final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager(); - Set regionSet = new HashSet(); + Set regionSet = new HashSet<>(); HRegionInfo region = null; HRegionServer hrs = null; HRegionServer dstRS = null; @@ -1214,10 +1214,10 @@ public class TestDistributedLogSplitting { List regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices()); LOG.info("#regions = " + regions.size()); - Set tmpRegions = new HashSet(); + Set tmpRegions = new HashSet<>(); tmpRegions.add(HRegionInfo.FIRST_META_REGIONINFO); master.getMasterWalManager().prepareLogReplay(hrs.getServerName(), tmpRegions); - Set userRegionSet = new HashSet(); + Set userRegionSet = new HashSet<>(); userRegionSet.addAll(regions); master.getMasterWalManager().prepareLogReplay(hrs.getServerName(), userRegionSet); boolean isMetaRegionInRecovery = false; @@ -1591,7 +1591,7 @@ public class TestDistributedLogSplitting { htd.addFamily(new HColumnDescriptor(family)); byte[] value = new byte[edit_size]; - List hris = new ArrayList(); + List hris = new ArrayList<>(); for (HRegionInfo region : regions) { if (!region.getTable().getNameAsString().equalsIgnoreCase(tname)) { continue; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index 37e714ea1af..fe0e7b193a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -286,7 +286,7 @@ public class TestMasterNoCluster { // Record a newer server in server manager at first getServerManager().recordNewServerWithLock(newServer, ServerLoad.EMPTY_SERVERLOAD); - List onlineServers = new ArrayList(); + List onlineServers = new ArrayList<>(); onlineServers.add(deadServer); onlineServers.add(newServer); // Mock the region server tracker to pull the dead server from zk diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index bb8a9959159..6c737e912e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -219,7 +219,7 @@ public class TestMasterOperationsForRegionReplicas { //just check that the number of default replica regions in the meta table are the same //as the number of regions the table was created with, and the count of the //replicas is numReplica for each region - Map defaultReplicas = new HashMap(); + Map defaultReplicas = new HashMap<>(); for (HRegionInfo hri : hris) { Integer i; HRegionInfo regionReplica0 = RegionReplicaUtil.getRegionInfoForDefaultReplica(hri); @@ -227,7 +227,7 @@ public class TestMasterOperationsForRegionReplicas { (i = defaultReplicas.get(regionReplica0)) == null ? 1 : i + 1); } assert(defaultReplicas.size() == numRegions); - Collection counts = new HashSet(defaultReplicas.values()); + Collection counts = new HashSet<>(defaultReplicas.values()); assert(counts.size() == 1 && counts.contains(new Integer(numReplica))); } finally { ADMIN.disableTable(tableName); @@ -248,7 +248,7 @@ public class TestMasterOperationsForRegionReplicas { desc.addFamily(new HColumnDescriptor("family")); ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); TEST_UTIL.waitTableEnabled(tableName); - Set tableRows = new HashSet(); + Set tableRows = new HashSet<>(); List hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName); for (HRegionInfo hri : hris) { tableRows.add(hri.getRegionName()); @@ -317,7 +317,7 @@ public class TestMasterOperationsForRegionReplicas { continue; } List regions = entry.getValue(); - Set setOfStartKeys = new HashSet(); + Set setOfStartKeys = new HashSet<>(); for (HRegionInfo region : regions) { byte[] startKey = region.getStartKey(); if (region.getTable().equals(table)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java index af54ffc81ff..b59e6ffcab5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java @@ -90,7 +90,7 @@ public class TestMasterStatusServlet { // Fake AssignmentManager and RIT AssignmentManager am = Mockito.mock(AssignmentManager.class); RegionStates rs = Mockito.mock(RegionStates.class); - Set regionsInTransition = new HashSet(); + Set regionsInTransition = new HashSet<>(); regionsInTransition.add(new RegionState(FAKE_HRI, RegionState.State.CLOSING, 12345L, FAKE_HOST)); Mockito.doReturn(rs).when(am).getRegionStates(); Mockito.doReturn(regionsInTransition).when(rs).getRegionsInTransition(); @@ -145,7 +145,7 @@ public class TestMasterStatusServlet { List servers = Lists.newArrayList( ServerName.valueOf("rootserver,123,12345"), ServerName.valueOf("metaserver,123,12345")); - Set deadServers = new HashSet( + Set deadServers = new HashSet<>( Lists.newArrayList( ServerName.valueOf("badserver,123,12345"), ServerName.valueOf("uglyserver,123,12345")) @@ -164,8 +164,7 @@ public class TestMasterStatusServlet { RegionStates rs = Mockito.mock(RegionStates.class); // Add 100 regions as in-transition - TreeSet regionsInTransition = new TreeSet( - RegionStates.REGION_STATE_COMPARATOR); + TreeSet regionsInTransition = new TreeSet<>(RegionStates.REGION_STATE_COMPARATOR); for (byte i = 0; i < 100; i++) { HRegionInfo hri = new HRegionInfo(FAKE_TABLE.getTableName(), new byte[]{i}, new byte[]{(byte) (i+1)}); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterWalManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterWalManager.java index 7c7531f73ac..782c40061a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterWalManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterWalManager.java @@ -90,7 +90,7 @@ public class TestMasterWalManager { inRecoveringRegionPath = ZKUtil.joinZNode(inRecoveringRegionPath, inRecoveryServerName.getServerName()); ZKUtil.createWithParents(zkw, inRecoveringRegionPath); - Set servers = new HashSet(); + Set servers = new HashSet<>(); servers.add(previouselyFaildServerName); mwm.removeStaleRecoveringRegionsFromZK(servers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index b2be237e13b..67add2f1bf8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -80,8 +80,7 @@ public class TestRegionPlacement { private static Position[] positions = Position.values(); private int lastRegionOnPrimaryRSCount = 0; private int REGION_NUM = 10; - private Map favoredNodesAssignmentPlan = - new HashMap(); + private Map favoredNodesAssignmentPlan = new HashMap<>(); @BeforeClass public static void setupBeforeClass() throws Exception { @@ -204,14 +203,12 @@ public class TestRegionPlacement { } while (ServerName.isSameHostnameAndPort(metaServer, serverToKill) || isNamespaceServer || TEST_UTIL.getHBaseCluster().getRegionServer(killIndex).getNumberOfOnlineRegions() == 0); LOG.debug("Stopping RS " + serverToKill); - Map> regionsToVerify = - new HashMap>(); + Map> regionsToVerify = new HashMap<>(); // mark the regions to track for (Map.Entry entry : favoredNodesAssignmentPlan.entrySet()) { ServerName s = entry.getValue()[0]; if (ServerName.isSameHostnameAndPort(s, serverToKill)) { - regionsToVerify.put(entry.getKey(), new Pair( - entry.getValue()[1], entry.getValue()[2])); + regionsToVerify.put(entry.getKey(), new Pair<>(entry.getValue()[1], entry.getValue()[2])); LOG.debug("Adding " + entry.getKey() + " with sedcondary/tertiary " + entry.getValue()[1] + " " + entry.getValue()[2]); } @@ -308,7 +305,7 @@ public class TestRegionPlacement { plan.getAssignmentMap().entrySet()) { // copy the server list from the original plan - List shuffledServerList = new ArrayList(); + List shuffledServerList = new ArrayList<>(); shuffledServerList.addAll(entry.getValue()); // start to shuffle diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java index 7c6f08b2806..f10c3687e9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java @@ -79,12 +79,12 @@ public class TestRegionPlacement2 { LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); balancer.initialize(); - List servers = new ArrayList(); + List servers = new ArrayList<>(); for (int i = 0; i < SLAVES; i++) { ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName(); servers.add(server); } - List regions = new ArrayList(1); + List regions = new ArrayList<>(1); HRegionInfo region = new HRegionInfo(TableName.valueOf(name.getMethodName())); regions.add(region); Map> assignmentMap = balancer.roundRobinAssignment(regions, @@ -140,12 +140,12 @@ public class TestRegionPlacement2 { LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); balancer.initialize(); - List servers = new ArrayList(); + List servers = new ArrayList<>(); for (int i = 0; i < SLAVES; i++) { ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName(); servers.add(server); } - List regions = new ArrayList(1); + List regions = new ArrayList<>(1); HRegionInfo region = new HRegionInfo(TableName.valueOf(name.getMethodName())); regions.add(region); ServerName serverBefore = balancer.randomAssignment(region, servers); @@ -183,7 +183,7 @@ public class TestRegionPlacement2 { private List removeMatchingServers(Collection serversWithoutStartCode, List servers) { - List serversToRemove = new ArrayList(); + List serversToRemove = new ArrayList<>(); for (ServerName s : serversWithoutStartCode) { serversToRemove.addAll(removeMatchingServers(s, servers)); } @@ -192,7 +192,7 @@ public class TestRegionPlacement2 { private List removeMatchingServers(ServerName serverWithoutStartCode, List servers) { - List serversToRemove = new ArrayList(); + List serversToRemove = new ArrayList<>(); for (ServerName s : servers) { if (ServerName.isSameHostnameAndPort(s, serverWithoutStartCode)) { serversToRemove.add(s); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java index ac99b290068..80c6f3a875c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java @@ -259,8 +259,8 @@ public class TestRollingRestart { private NavigableSet getDoubleAssignedRegions( MiniHBaseCluster cluster) throws IOException { - NavigableSet online = new TreeSet(); - NavigableSet doubled = new TreeSet(); + NavigableSet online = new TreeSet<>(); + NavigableSet doubled = new TreeSet<>(); for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { for (HRegionInfo region : ProtobufUtil.getOnlineRegions( rst.getRegionServer().getRSRpcServices())) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index 0f427add076..f93449c57ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -259,7 +259,7 @@ public class BalancerTestBase { } public List resolve(List names) { - List ret = new ArrayList(names.size()); + List ret = new ArrayList<>(names.size()); for (String name : names) { ret.add("rack"); } @@ -345,14 +345,14 @@ public class BalancerTestBase { * Checks whether region replicas are not hosted on the same host. */ public void assertRegionReplicaPlacement(Map> serverMap, RackManager rackManager) { - TreeMap> regionsPerHost = new TreeMap>(); - TreeMap> regionsPerRack = new TreeMap>(); + TreeMap> regionsPerHost = new TreeMap<>(); + TreeMap> regionsPerRack = new TreeMap<>(); for (Entry> entry : serverMap.entrySet()) { String hostname = entry.getKey().getHostname(); Set infos = regionsPerHost.get(hostname); if (infos == null) { - infos = new HashSet(); + infos = new HashSet<>(); regionsPerHost.put(hostname, infos); } @@ -372,7 +372,7 @@ public class BalancerTestBase { String rack = rackManager.getRack(entry.getKey()); Set infos = regionsPerRack.get(rack); if (infos == null) { - infos = new HashSet(); + infos = new HashSet<>(); regionsPerRack.put(rack, infos); } @@ -399,7 +399,7 @@ public class BalancerTestBase { } protected List convertToList(final Map> servers) { - List list = new ArrayList(servers.size()); + List list = new ArrayList<>(servers.size()); for (Map.Entry> e : servers.entrySet()) { list.add(new ServerAndLoad(e.getKey(), e.getValue().size())); } @@ -407,7 +407,7 @@ public class BalancerTestBase { } protected String printMock(List balancedCluster) { - SortedSet sorted = new TreeSet(balancedCluster); + SortedSet sorted = new TreeSet<>(balancedCluster); ServerAndLoad[] arr = sorted.toArray(new ServerAndLoad[sorted.size()]); StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4); sb.append("{ "); @@ -434,9 +434,9 @@ public class BalancerTestBase { protected List reconcile(List list, List plans, Map> servers) { - List result = new ArrayList(list.size()); + List result = new ArrayList<>(list.size()); - Map map = new HashMap(list.size()); + Map map = new HashMap<>(list.size()); for (ServerAndLoad sl : list) { map.put(sl.getServerName(), sl); } @@ -477,7 +477,7 @@ public class BalancerTestBase { protected TreeMap> mockClusterServers(int[] mockCluster, int numTables) { int numServers = mockCluster.length; - TreeMap> servers = new TreeMap>(); + TreeMap> servers = new TreeMap<>(); for (int i = 0; i < numServers; i++) { int numRegions = mockCluster[i]; ServerAndLoad sal = randomServer(0); @@ -489,7 +489,7 @@ public class BalancerTestBase { protected TreeMap> mockUniformClusterServers(int[] mockCluster) { int numServers = mockCluster.length; - TreeMap> servers = new TreeMap>(); + TreeMap> servers = new TreeMap<>(); for (int i = 0; i < numServers; i++) { int numRegions = mockCluster[i]; ServerAndLoad sal = randomServer(0); @@ -507,12 +507,12 @@ public class BalancerTestBase { for (HRegionInfo hri : regions){ TreeMap> servers = result.get(hri.getTable()); if (servers == null) { - servers = new TreeMap>(); + servers = new TreeMap<>(); result.put(hri.getTable(), servers); } List hrilist = servers.get(sal); if (hrilist == null) { - hrilist = new ArrayList(); + hrilist = new ArrayList<>(); servers.put(sal, hrilist); } hrilist.add(hri); @@ -520,20 +520,20 @@ public class BalancerTestBase { } for(Map.Entry>> entry : result.entrySet()){ for(ServerName srn : clusterServers.keySet()){ - if (!entry.getValue().containsKey(srn)) entry.getValue().put(srn, new ArrayList()); + if (!entry.getValue().containsKey(srn)) entry.getValue().put(srn, new ArrayList<>()); } } return result; } - private Queue regionQueue = new LinkedList(); + private Queue regionQueue = new LinkedList<>(); protected List randomRegions(int numRegions) { return randomRegions(numRegions, -1); } protected List randomRegions(int numRegions, int numTables) { - List regions = new ArrayList(numRegions); + List regions = new ArrayList<>(numRegions); byte[] start = new byte[16]; byte[] end = new byte[16]; rand.nextBytes(start); @@ -554,7 +554,7 @@ public class BalancerTestBase { } protected List uniformRegions(int numRegions) { - List regions = new ArrayList(numRegions); + List regions = new ArrayList<>(numRegions); byte[] start = new byte[16]; byte[] end = new byte[16]; rand.nextBytes(start); @@ -574,7 +574,7 @@ public class BalancerTestBase { regionQueue.addAll(regions); } - private Queue serverQueue = new LinkedList(); + private Queue serverQueue = new LinkedList<>(); protected ServerAndLoad randomServer(final int numRegionsPerServer) { if (!this.serverQueue.isEmpty()) { @@ -589,7 +589,7 @@ public class BalancerTestBase { } protected List randomServers(int numServers, int numRegionsPerServer) { - List servers = new ArrayList(numServers); + List servers = new ArrayList<>(numServers); for (int i = 0; i < numServers; i++) { servers.add(randomServer(numRegionsPerServer)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java index 02032fd169d..751adc56374 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java @@ -185,7 +185,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { // Test simple case where all same servers are there List servers = randomServers(10, 10); List regions = randomRegions(100); - Map existing = new TreeMap(); + Map existing = new TreeMap<>(); for (int i = 0; i < regions.size(); i++) { ServerName sn = servers.get(i % servers.size()).getServerName(); // The old server would have had same host and port, but different @@ -200,7 +200,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { assertRetainedAssignment(existing, listOfServerNames, assignment); // Include two new servers that were not there before - List servers2 = new ArrayList(servers); + List servers2 = new ArrayList<>(servers); servers2.add(randomServer(10)); servers2.add(randomServer(10)); listOfServerNames = getListOfServerNames(servers2); @@ -208,7 +208,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { assertRetainedAssignment(existing, listOfServerNames, assignment); // Remove two of the servers that were previously there - List servers3 = new ArrayList(servers); + List servers3 = new ArrayList<>(servers); servers3.remove(0); servers3.remove(0); listOfServerNames = getListOfServerNames(servers3); @@ -266,9 +266,9 @@ public class TestBaseLoadBalancer extends BalancerTestBase { // replica from one node to a specific other node or rack lowers the // availability of the region or not - List list0 = new ArrayList(); - List list1 = new ArrayList(); - List list2 = new ArrayList(); + List list0 = new ArrayList<>(); + List list1 = new ArrayList<>(); + List list2 = new ArrayList<>(); // create a region (region1) HRegionInfo hri1 = new HRegionInfo( TableName.valueOf(name.getMethodName()), "key1".getBytes(), "key2".getBytes(), @@ -282,8 +282,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { list0.add(hri1); //only region1 list1.add(hri2); //only replica_of_region1 list2.add(hri3); //only region2 - Map> clusterState = - new LinkedHashMap>(); + Map> clusterState = new LinkedHashMap<>(); clusterState.put(servers[0], list0); //servers[0] hosts region1 clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 clusterState.put(servers[2], list2); //servers[2] hosts region2 @@ -318,7 +317,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 and replica_of_region2 clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 - clusterState.put(servers[10], new ArrayList()); //servers[10], rack3 hosts no region + clusterState.put(servers[10], new ArrayList<>()); //servers[10], rack3 hosts no region // create a cluster with the above clusterState cluster = new Cluster(clusterState, null, null, rackManager); // check whether a move of region1 from servers[0],rack1 to servers[6],rack2 would @@ -335,9 +334,9 @@ public class TestBaseLoadBalancer extends BalancerTestBase { @Test (timeout=180000) public void testRegionAvailabilityWithRegionMoves() throws Exception { - List list0 = new ArrayList(); - List list1 = new ArrayList(); - List list2 = new ArrayList(); + List list0 = new ArrayList<>(); + List list1 = new ArrayList<>(); + List list2 = new ArrayList<>(); // create a region (region1) HRegionInfo hri1 = new HRegionInfo( TableName.valueOf(name.getMethodName()), "key1".getBytes(), "key2".getBytes(), @@ -351,8 +350,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { list0.add(hri1); //only region1 list1.add(hri2); //only replica_of_region1 list2.add(hri3); //only region2 - Map> clusterState = - new LinkedHashMap>(); + Map> clusterState = new LinkedHashMap<>(); clusterState.put(servers[0], list0); //servers[0] hosts region1 clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 clusterState.put(servers[2], list2); //servers[2] hosts region2 @@ -374,7 +372,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { // start over again clusterState.clear(); - List list3 = new ArrayList(); + List list3 = new ArrayList<>(); HRegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1); list3.add(hri4); clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 @@ -394,7 +392,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { } private List getListOfServerNames(final List sals) { - List list = new ArrayList(); + List list = new ArrayList<>(); for (ServerAndLoad e : sals) { list.add(e.getServerName()); } @@ -417,8 +415,8 @@ public class TestBaseLoadBalancer extends BalancerTestBase { private void assertRetainedAssignment(Map existing, List servers, Map> assignment) { // Verify condition 1, every region assigned, and to online server - Set onlineServerSet = new TreeSet(servers); - Set assignedRegions = new TreeSet(); + Set onlineServerSet = new TreeSet<>(servers); + Set assignedRegions = new TreeSet<>(); for (Map.Entry> a : assignment.entrySet()) { assertTrue("Region assigned to server that was not listed as online", onlineServerSet.contains(a.getKey())); @@ -428,7 +426,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { assertEquals(existing.size(), assignedRegions.size()); // Verify condition 2, if server had existing assignment, must have same - Set onlineHostNames = new TreeSet(); + Set onlineHostNames = new TreeSet<>(); for (ServerName s : servers) { onlineHostNames.add(s.getHostname()); } @@ -453,12 +451,12 @@ public class TestBaseLoadBalancer extends BalancerTestBase { // sharing same host and port List servers = getListOfServerNames(randomServers(10, 10)); List regions = randomRegions(101); - Map> clusterState = new HashMap>(); + Map> clusterState = new HashMap<>(); assignRegions(regions, servers, clusterState); // construct another list of servers, but sharing same hosts and ports - List oldServers = new ArrayList(servers.size()); + List oldServers = new ArrayList<>(servers.size()); for (ServerName sn : servers) { // The old server would have had same host and port, but different start code! oldServers.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10)); @@ -479,7 +477,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { ServerName sn = servers.get(i % servers.size()); List regionsOfServer = clusterState.get(sn); if (regionsOfServer == null) { - regionsOfServer = new ArrayList(10); + regionsOfServer = new ArrayList<>(10); clusterState.put(sn, regionsOfServer); } @@ -492,7 +490,7 @@ public class TestBaseLoadBalancer extends BalancerTestBase { // tests whether region locations are handled correctly in Cluster List servers = getListOfServerNames(randomServers(10, 10)); List regions = randomRegions(101); - Map> clusterState = new HashMap>(); + Map> clusterState = new HashMap<>(); assignRegions(regions, servers, clusterState); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java index 962daf7c221..610ecf7e3b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java @@ -129,16 +129,15 @@ public class TestDefaultLoadBalancer extends BalancerTestBase { */ @Test (timeout=60000) public void testBalanceClusterOverall() throws Exception { - Map>> clusterLoad - = new TreeMap>>(); + Map>> clusterLoad = new TreeMap<>(); for (int[] mockCluster : clusterStateMocks) { Map> clusterServers = mockClusterServers(mockCluster, 50); List clusterList = convertToList(clusterServers); clusterLoad.put(TableName.valueOf(name.getMethodName()), clusterServers); HashMap>> result = mockClusterServersWithTables(clusterServers); loadBalancer.setClusterLoad(clusterLoad); - List clusterplans = new ArrayList(); - List> regionAmountList = new ArrayList>(); + List clusterplans = new ArrayList<>(); + List> regionAmountList = new ArrayList<>(); for(TreeMap> servers : result.values()){ List list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); @@ -168,8 +167,7 @@ public class TestDefaultLoadBalancer extends BalancerTestBase { */ @Test (timeout=60000) public void testImpactOfBalanceClusterOverall() throws Exception { - Map>> clusterLoad - = new TreeMap>>(); + Map>> clusterLoad = new TreeMap<>(); Map> clusterServers = mockUniformClusterServers(mockUniformCluster); List clusterList = convertToList(clusterServers); clusterLoad.put(TableName.valueOf(name.getMethodName()), clusterServers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java index f18d7227b43..365059c01b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java @@ -101,7 +101,7 @@ public class TestRegionLocationFinder { @Test public void testMapHostNameToServerName() throws Exception { - List topHosts = new ArrayList(); + List topHosts = new ArrayList<>(); for (int i = 0; i < ServerNum; i++) { HRegionServer server = cluster.getRegionServer(i); String serverHost = server.getServerName().getHostname(); @@ -151,7 +151,7 @@ public class TestRegionLocationFinder { if (regions.size() <= 0) { continue; } - List regionInfos = new ArrayList(regions.size()); + List regionInfos = new ArrayList<>(regions.size()); for (Region region : regions) { regionInfos.add(region.getRegionInfo()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java index 368f4facc3f..fee98c991b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -73,8 +73,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase { RegionLoad rl = mock(RegionLoad.class); when(rl.getStorefileSizeMB()).thenReturn(i); - Map regionLoadMap = - new TreeMap(Bytes.BYTES_COMPARATOR); + Map regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); regionLoadMap.put(Bytes.toBytes(REGION_KEY), rl); when(sl.getRegionsLoad()).thenReturn(regionLoadMap); @@ -341,7 +340,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase { ServerName deadSn = ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 100); - serverMap.put(deadSn, new ArrayList(0)); + serverMap.put(deadSn, new ArrayList<>(0)); plans = loadBalancer.balanceCluster(serverMap); assertNull(plans); @@ -443,7 +442,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase { List regions = randomRegions(1); ServerName s1 = ServerName.valueOf("host1", 1000, 11111); ServerName s2 = ServerName.valueOf("host11", 1000, 11111); - Map> map = new HashMap>(); + Map> map = new HashMap<>(); map.put(s1, regions); regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1)); // until the step above s1 holds two replicas of a region @@ -454,7 +453,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase { // and both the replicas are on the same rack map.clear(); regions = randomRegions(1); - List regionsOnS2 = new ArrayList(1); + List regionsOnS2 = new ArrayList<>(1); regionsOnS2.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1)); map.put(s1, regions); map.put(s2, regionsOnS2); @@ -569,12 +568,12 @@ public class TestStochasticLoadBalancer extends BalancerTestBase { int numNodesPerHost = 4; // create a new map with 4 RS per host. - Map> newServerMap = new TreeMap>(serverMap); + Map> newServerMap = new TreeMap<>(serverMap); for (Map.Entry> entry : serverMap.entrySet()) { for (int i=1; i < numNodesPerHost; i++) { ServerName s1 = entry.getKey(); ServerName s2 = ServerName.valueOf(s1.getHostname(), s1.getPort() + i, 1); // create an RS for the same host - newServerMap.put(s2, new ArrayList()); + newServerMap.put(s2, new ArrayList<>()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index b6b5492a2e6..3467f08d697 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -176,7 +176,7 @@ public class TestLogsCleaner { rqc.set(cleaner, rqcMock); // This should return eventually when cversion stabilizes - cleaner.getDeletableFiles(new LinkedList()); + cleaner.getDeletableFiles(new LinkedList<>()); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index 817cfb49b41..6df05c00299 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -140,7 +140,7 @@ public class TestReplicationHFileCleaner { cleaner.isFileDeletable(fs.getFileStatus(file))); List> files = new ArrayList<>(1); - files.add(new Pair(null, file)); + files.add(new Pair<>(null, file)); // 4. Add the file to hfile-refs queue rq.addHFileRefs(peerId, files); // 5. Assert file should not be deletable @@ -159,7 +159,7 @@ public class TestReplicationHFileCleaner { fs.createNewFile(deletablefile); assertTrue("Test file not created!", fs.exists(deletablefile)); - List files = new ArrayList(2); + List files = new ArrayList<>(2); FileStatus f = new FileStatus(); f.setPath(deletablefile); files.add(f); @@ -168,7 +168,7 @@ public class TestReplicationHFileCleaner { files.add(f); List> hfiles = new ArrayList<>(1); - hfiles.add(new Pair(null, notDeletablefile)); + hfiles.add(new Pair<>(null, notDeletablefile)); // 2. Add one file to hfile-refs queue rq.addHFileRefs(peerId, hfiles); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 7e6691df488..ce8b0c6e817 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -462,7 +462,7 @@ public class MasterProcedureTestingUtility { public void addProcId(long procId) { if (procsToAbort == null) { - procsToAbort = new TreeSet(); + procsToAbort = new TreeSet<>(); } procsToAbort.add(procId); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java index df431a539d1..6d88502ef8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java @@ -84,8 +84,8 @@ public class TestMasterProcedureSchedulerConcurrency { assertEquals(opsCount.get(), queue.size()); final Thread[] threads = new Thread[NUM_TABLES * 2]; - final HashSet concurrentTables = new HashSet(); - final ArrayList failures = new ArrayList(); + final HashSet concurrentTables = new HashSet<>(); + final ArrayList failures = new ArrayList<>(); final AtomicInteger concurrentCount = new AtomicInteger(0); for (int i = 0; i < threads.length; ++i) { threads[i] = new Thread() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java index 51aff6d4dc8..76d4585f106 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java @@ -199,7 +199,7 @@ public class TestSnapshotFileCache { class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector { public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException { - Collection files = new HashSet(); + Collection files = new HashSet<>(); files.addAll(SnapshotReferenceUtil.getHFileNames(UTIL.getConfiguration(), fs, snapshotDir)); return files; } @@ -223,7 +223,7 @@ public class TestSnapshotFileCache { private void createAndTestSnapshot(final SnapshotFileCache cache, final SnapshotMock.SnapshotBuilder builder, final boolean tmp, final boolean removeOnExit) throws IOException { - List files = new ArrayList(); + List files = new ArrayList<>(); for (int i = 0; i < 3; ++i) { for (Path filePath: builder.addRegion()) { String fileName = filePath.getName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java index 30bea8cdee1..fba250da9ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java @@ -116,7 +116,7 @@ public class TestSnapshotHFileCleaner { class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector { public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException { - Collection files = new HashSet(); + Collection files = new HashSet<>(); files.addAll(SnapshotReferenceUtil.getHFileNames(TEST_UTIL.getConfiguration(), fs, snapshotDir)); return files; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index b73b943ce89..83936aa637c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -624,7 +624,7 @@ public class TestMobCompactor { // the ref name is the new file Path mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, hcd1.getNameAsString()); - List paths = new ArrayList(); + List paths = new ArrayList<>(); if (fs.exists(mobFamilyPath)) { FileStatus[] files = fs.listStatus(mobFamilyPath); for (FileStatus file : files) { @@ -1015,7 +1015,7 @@ public class TestMobCompactor { private static ExecutorService createThreadPool(Configuration conf) { int maxThreads = 10; long keepAliveTime = 60; - final SynchronousQueue queue = new SynchronousQueue(); + final SynchronousQueue queue = new SynchronousQueue<>(); ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, queue, Threads.newDaemonThreadFactory("MobFileCompactionChore"), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java index 3aaf0e4c102..290e6f48956 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java @@ -715,7 +715,7 @@ public class TestPartitionedMobCompactor { @Override protected List performCompaction(PartitionedMobCompactionRequest request) throws IOException { - List delFilePaths = new ArrayList(); + List delFilePaths = new ArrayList<>(); for (CompactionDelPartition delPartition: request.getDelPartitions()) { for (Path p : delPartition.listDelFiles()) { delFilePaths.add(p); @@ -848,7 +848,7 @@ public class TestPartitionedMobCompactor { * @return the cell size */ private int countDelCellsInDelFiles(List paths) throws IOException { - List sfs = new ArrayList(); + List sfs = new ArrayList<>(); int size = 0; for(Path path : paths) { StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE); @@ -878,7 +878,7 @@ public class TestPartitionedMobCompactor { private static ExecutorService createThreadPool() { int maxThreads = 10; long keepAliveTime = 60; - final SynchronousQueue queue = new SynchronousQueue(); + final SynchronousQueue queue = new SynchronousQueue<>(); ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, queue, Threads.newDaemonThreadFactory("MobFileCompactionChore"), new RejectedExecutionHandler() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java index 91279b6ceb1..e71318b65a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java @@ -82,7 +82,7 @@ public class SimpleMasterProcedureManager extends MasterProcedureManager { ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getInstance()); List serverNames = master.getServerManager().getOnlineServersList(); - List servers = new ArrayList(); + List servers = new ArrayList<>(); for (ServerName sn : serverNames) { servers.add(sn.toString()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index 7620bbbecf8..58efa87be13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -119,15 +119,15 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { private final ExecutorCompletionService taskPool; private final ThreadPoolExecutor executor; private volatile boolean aborted; - private final List> futures = new ArrayList>(); + private final List> futures = new ArrayList<>(); private final String name; public SimpleSubprocedurePool(String name, Configuration conf) { this.name = name; executor = new ThreadPoolExecutor(1, 1, 500, TimeUnit.SECONDS, - new LinkedBlockingQueue(), + new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs(" + name + ")-procedure-pool")); - taskPool = new ExecutorCompletionService(executor); + taskPool = new ExecutorCompletionService<>(executor); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java index c424b6d449d..fa934d90821 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java @@ -86,7 +86,7 @@ public class TestProcedure { @Test(timeout = 60000) public void testSingleMember() throws Exception { // The member - List members = new ArrayList(); + List members = new ArrayList<>(); members.add("member"); LatchedProcedure proc = new LatchedProcedure(coord, new ForeignExceptionDispatcher(), 100, Integer.MAX_VALUE, "op", null, members); @@ -130,7 +130,7 @@ public class TestProcedure { @Test(timeout = 60000) public void testMultipleMember() throws Exception { // 2 members - List members = new ArrayList(); + List members = new ArrayList<>(); members.add("member1"); members.add("member2"); @@ -181,7 +181,7 @@ public class TestProcedure { @Test(timeout = 60000) public void testErrorPropagation() throws Exception { - List members = new ArrayList(); + List members = new ArrayList<>(); members.add("member"); Procedure proc = new Procedure(coord, new ForeignExceptionDispatcher(), 100, Integer.MAX_VALUE, "op", null, members); @@ -206,7 +206,7 @@ public class TestProcedure { @Test(timeout = 60000) public void testBarrieredErrorPropagation() throws Exception { - List members = new ArrayList(); + List members = new ArrayList<>(); members.add("member"); LatchedProcedure proc = new LatchedProcedure(coord, new ForeignExceptionDispatcher(), 100, Integer.MAX_VALUE, "op", null, members); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureManager.java index b52a8d6b1e2..2f0b5b90eae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureManager.java @@ -64,7 +64,7 @@ public class TestProcedureManager { Admin admin = util.getAdmin(); byte[] result = admin.execProcedureWithRet(SimpleMasterProcedureManager.SIMPLE_SIGNATURE, - "mytest", new HashMap()); + "mytest", new HashMap<>()); assertArrayEquals("Incorrect return data from execProcedure", SimpleMasterProcedureManager.SIMPLE_DATA.getBytes(), result); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java index 211e9e61bd6..9a77ce5a7d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java @@ -141,20 +141,19 @@ public class TestZKProcedure { // build and start members // NOTE: There is a single subprocedure builder for all members here. SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class); - List> procMembers = new ArrayList>( - members.length); + List> procMembers = new ArrayList<>(members.length); // start each member for (String member : members) { ZooKeeperWatcher watcher = newZooKeeperWatcher(); ZKProcedureMemberRpcs comms = new ZKProcedureMemberRpcs(watcher, opDescription); ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE); ProcedureMember procMember = new ProcedureMember(comms, pool2, subprocFactory); - procMembers.add(new Pair(procMember, comms)); + procMembers.add(new Pair<>(procMember, comms)); comms.start(member, procMember); } // setup mock member subprocedures - final List subprocs = new ArrayList(); + final List subprocs = new ArrayList<>(); for (int i = 0; i < procMembers.size(); i++) { ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher(); Subprocedure commit = Mockito @@ -216,19 +215,18 @@ public class TestZKProcedure { // start a member for each node SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class); - List> members = new ArrayList>( - expected.size()); + List> members = new ArrayList<>(expected.size()); for (String member : expected) { ZooKeeperWatcher watcher = newZooKeeperWatcher(); ZKProcedureMemberRpcs controller = new ZKProcedureMemberRpcs(watcher, opDescription); ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE); ProcedureMember mem = new ProcedureMember(controller, pool2, subprocFactory); - members.add(new Pair(mem, controller)); + members.add(new Pair<>(mem, controller)); controller.start(member, mem); } // setup mock subprocedures - final List cohortTasks = new ArrayList(); + final List cohortTasks = new ArrayList<>(); final int[] elem = new int[1]; for (int i = 0; i < members.size(); i++) { ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java index 5b058b3c03a..d864db2993d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java @@ -180,7 +180,7 @@ public class TestZKProcedureControllers { CountDownLatch prepared = new CountDownLatch(expected.size()); CountDownLatch committed = new CountDownLatch(expected.size()); - ArrayList dataFromMembers = new ArrayList(); + ArrayList dataFromMembers = new ArrayList<>(); // mock out coordinator so we can keep track of zk progress ProcedureCoordinator coordinator = setupMockCoordinator(operationName, @@ -256,7 +256,7 @@ public class TestZKProcedureControllers { final CountDownLatch prepared = new CountDownLatch(expected.size()); final CountDownLatch committed = new CountDownLatch(expected.size()); - ArrayList dataFromMembers = new ArrayList(); + ArrayList dataFromMembers = new ArrayList<>(); // mock out coordinator so we can keep track of zk progress ProcedureCoordinator coordinator = setupMockCoordinator(operationName, @@ -403,14 +403,13 @@ public class TestZKProcedureControllers { // make a cohort controller for each expected node - List cohortControllers = new ArrayList(); + List cohortControllers = new ArrayList<>(); for (String nodeName : expected) { ZKProcedureMemberRpcs cc = new ZKProcedureMemberRpcs(watcher, operationName); cc.start(nodeName, member); cohortControllers.add(cc); } - return new Pair>( - controller, cohortControllers); + return new Pair<>(controller, cohortControllers); } }; @@ -427,7 +426,7 @@ public class TestZKProcedureControllers { ProcedureMember member, List expected) throws Exception { // make a cohort controller for each expected node - List cohortControllers = new ArrayList(); + List cohortControllers = new ArrayList<>(); for (String nodeName : expected) { ZKProcedureMemberRpcs cc = new ZKProcedureMemberRpcs(watcher, operationName); cc.start(nodeName, member); @@ -439,8 +438,7 @@ public class TestZKProcedureControllers { watcher, operationName, CONTROLLER_NODE_NAME); controller.start(coordinator); - return new Pair>( - controller, cohortControllers); + return new Pair<>(controller, cohortControllers); } }; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java index 057a35d4d47..1e3a0c248b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java @@ -41,19 +41,19 @@ public class TestReplicationProtobuf { */ @Test public void testGetCellScanner() throws IOException { - List a = new ArrayList(); + List a = new ArrayList<>(); KeyValue akv = new KeyValue(Bytes.toBytes("a"), -1L); a.add(akv); // Add a few just to make it less regular. a.add(new KeyValue(Bytes.toBytes("aa"), -1L)); a.add(new KeyValue(Bytes.toBytes("aaa"), -1L)); - List b = new ArrayList(); + List b = new ArrayList<>(); KeyValue bkv = new KeyValue(Bytes.toBytes("b"), -1L); a.add(bkv); - List c = new ArrayList(); + List c = new ArrayList<>(); KeyValue ckv = new KeyValue(Bytes.toBytes("c"), -1L); c.add(ckv); - List> all = new ArrayList>(); + List> all = new ArrayList<>(); all.add(a); all.add(b); all.add(c); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java index 4dce69647dd..7229c409eef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java @@ -39,7 +39,7 @@ public class AbstractTestDateTieredCompactionPolicy extends TestCompactionPolicy EnvironmentEdgeManager.injectEdge(timeMachine); // Has to be > 0 and < now. timeMachine.setValue(1); - ArrayList ageInDisk = new ArrayList(); + ArrayList ageInDisk = new ArrayList<>(); for (int i = 0; i < sizes.length; i++) { ageInDisk.add(0L); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index bbcdce440ef..dd202591f83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -117,7 +117,7 @@ public class DataBlockEncodingTool { private static int benchmarkNTimes = DEFAULT_BENCHMARK_N_TIMES; private static int benchmarkNOmit = DEFAULT_BENCHMARK_N_OMIT; - private List codecs = new ArrayList(); + private List codecs = new ArrayList<>(); private long totalPrefixLength = 0; private long totalKeyLength = 0; private long totalValueLength = 0; @@ -236,8 +236,7 @@ public class DataBlockEncodingTool { KeyValue currentKv; scanner.seek(KeyValue.LOWESTKEY); - List> codecIterators = - new ArrayList>(); + List> codecIterators = new ArrayList<>(); for(EncodedDataBlock codec : codecs) { codecIterators.add(codec.getIterator(HFileBlock.headerSize(useHBaseChecksum))); } @@ -326,7 +325,7 @@ public class DataBlockEncodingTool { int totalSize = 0; // decompression time - List durations = new ArrayList(); + List durations = new ArrayList<>(); for (int itTime = 0; itTime < benchmarkNTimes; ++itTime) { totalSize = 0; @@ -352,7 +351,7 @@ public class DataBlockEncodingTool { prevTotalSize = totalSize; } - List encodingDurations = new ArrayList(); + List encodingDurations = new ArrayList<>(); for (int itTime = 0; itTime < benchmarkNTimes; ++itTime) { final long startTime = System.nanoTime(); codec.encodeData(); @@ -390,7 +389,7 @@ public class DataBlockEncodingTool { System.out.println(name + ":"); // compress it - List compressDurations = new ArrayList(); + List compressDurations = new ArrayList<>(); ByteArrayOutputStream compressedStream = new ByteArrayOutputStream(); CompressionOutputStream compressingStream = algorithm.createPlainCompressionStream(compressedStream, compressor); @@ -421,7 +420,7 @@ public class DataBlockEncodingTool { byte[] compBuffer = compressedStream.toByteArray(); // uncompress it several times and measure performance - List durations = new ArrayList(); + List durations = new ArrayList<>(); for (int itTime = 0; itTime < benchmarkNTimes; ++itTime) { final long startTime = System.nanoTime(); byte[] newBuf = new byte[length + 1]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index 9638e69e68b..eb77c287049 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -56,7 +56,7 @@ public class EncodedSeekPerformanceTest { } private List prepareListOfTestSeeks(Path path) throws IOException { - List allKeyValues = new ArrayList(); + List allKeyValues = new ArrayList<>(); // read all of the key values StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(), @@ -74,7 +74,7 @@ public class EncodedSeekPerformanceTest { storeFile.closeReader(cacheConf.shouldEvictOnClose()); // pick seeks by random - List seeks = new ArrayList(); + List seeks = new ArrayList<>(); for (int i = 0; i < numberOfSeeks; ++i) { Cell keyValue = allKeyValues.get( randomizer.nextInt(allKeyValues.size())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java index a4e7f9bd225..59aded8139a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java @@ -39,7 +39,7 @@ public class KeyValueScanFixture extends CollectionBackedScanner { } public static List scanFixture(KeyValue[] ... kvArrays) { - ArrayList scanners = new ArrayList(); + ArrayList scanners = new ArrayList<>(); for (KeyValue [] kvs : kvArrays) { scanners.add(new KeyValueScanFixture(CellComparator.COMPARATOR, kvs)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java index 5b4b0c1db48..11694344712 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java @@ -36,7 +36,7 @@ public class MockStoreFile extends StoreFile { boolean isRef = false; long ageInDisk; long sequenceid; - private Map metadata = new TreeMap(Bytes.BYTES_COMPARATOR); + private Map metadata = new TreeMap<>(Bytes.BYTES_COMPARATOR); byte[] splitPoint = null; TimeRangeTracker timeRangeTracker; long entryCount; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java index aa2bc1a937d..036c11c8cd0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequ * ${HBASE_HOME}/bin/hbase ./bin/hbase org.apache.hadoop.hbase.OOMERegionServer start. */ public class OOMERegionServer extends HRegionServer { - private List retainer = new ArrayList(); + private List retainer = new ArrayList<>(); public OOMERegionServer(HBaseConfiguration conf, CoordinatedStateManager cp) throws IOException, InterruptedException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java index d2e78b77dcf..cfae7cb2ab2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java @@ -144,13 +144,13 @@ public class RegionAsTable implements Table { @Override public Result next() throws IOException { - List cells = new ArrayList(); + List cells = new ArrayList<>(); return regionScanner.next(cells)? Result.create(cells): null; } @Override public Result[] next(int nbRows) throws IOException { - List results = new ArrayList(nbRows); + List results = new ArrayList<>(nbRows); for (int i = 0; i < nbRows; i++) { Result result = next(); if (result == null) break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index d00eef13355..ef3ce06efb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -493,7 +493,7 @@ public class TestAtomicOperation { } } long ts = timeStamps.incrementAndGet(); - List mrm = new ArrayList(); + List mrm = new ArrayList<>(); if (op) { Put p = new Put(row2, ts); p.addColumn(fam1, qual1, value1); @@ -518,7 +518,7 @@ public class TestAtomicOperation { // check: should always see exactly one column Scan s = new Scan(row); RegionScanner rs = region.getScanner(s); - List r = new ArrayList(); + List r = new ArrayList<>(); while (rs.next(r)) ; rs.close(); @@ -610,7 +610,7 @@ public class TestAtomicOperation { ctx.stop(); Scan s = new Scan(); RegionScanner scanner = region.getScanner(s); - List results = new ArrayList(); + List results = new ArrayList<>(); ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build(); scanner.next(results, scannerContext); for (Cell keyValue : results) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index edd7847422c..59c256af18f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -389,7 +389,7 @@ public class TestBlocksRead { Scan scan = new Scan(); scan.setCacheBlocks(false); RegionScanner rs = region.getScanner(scan); - List result = new ArrayList(2); + List result = new ArrayList<>(2); rs.next(result); assertEquals(2 * BLOOM_TYPE.length, result.size()); rs.close(); @@ -402,7 +402,7 @@ public class TestBlocksRead { blocksStart = blocksEnd; scan.setCacheBlocks(true); rs = region.getScanner(scan); - result = new ArrayList(2); + result = new ArrayList<>(2); rs.next(result); assertEquals(2 * BLOOM_TYPE.length, result.size()); rs.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java index b2ba97c9176..497fd0373ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java @@ -101,7 +101,7 @@ public class TestBlocksScanned extends HBaseTestCase { scan.setMaxVersions(1); InternalScanner s = r.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); while (s.next(results)) ; s.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java index 4c025c49fa4..418aadf562e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java @@ -106,7 +106,7 @@ public class TestBulkLoad { byte[] familyName = familyPaths.get(0).getFirst(); String storeFileName = familyPaths.get(0).getSecond(); storeFileName = (new Path(storeFileName)).getName(); - List storeFileNames = new ArrayList(); + List storeFileNames = new ArrayList<>(); storeFileNames.add(storeFileName); when(log.append(any(HRegionInfo.class), any(WALKey.class), argThat(bulkLogWalEdit(WALEdit.BULK_LOAD, tableName.toBytes(), @@ -129,8 +129,7 @@ public class TestBulkLoad { @Test public void bulkHLogShouldThrowNoErrorAndWriteMarkerWithBlankInput() throws IOException { - testRegionWithFamilies(family1).bulkLoadHFiles(new ArrayList>(), - false, null); + testRegionWithFamilies(family1).bulkLoadHFiles(new ArrayList<>(),false, null); } @Test @@ -219,7 +218,7 @@ public class TestBulkLoad { } private Pair withMissingHFileForFamily(byte[] family) { - return new Pair(family, getNotExistFilePath()); + return new Pair<>(family, getNotExistFilePath()); } private String getNotExistFilePath() { @@ -230,7 +229,7 @@ public class TestBulkLoad { private Pair withInvalidColumnFamilyButProperHFileLocation(byte[] family) throws IOException { createHFileForFamilies(family); - return new Pair(new byte[]{0x00, 0x01, 0x02}, getNotExistFilePath()); + return new Pair<>(new byte[]{0x00, 0x01, 0x02}, getNotExistFilePath()); } @@ -258,13 +257,13 @@ public class TestBulkLoad { } private List> getBlankFamilyPaths(){ - return new ArrayList>(); + return new ArrayList<>(); } private List> withFamilyPathsFor(byte[]... families) throws IOException { List> familyPaths = getBlankFamilyPaths(); for (byte[] family : families) { - familyPaths.add(new Pair(family, createHFileForFamilies(family))); + familyPaths.add(new Pair<>(family, createHFileForFamilies(family))); } return familyPaths; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 4a73eda4f14..9fed202932c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -137,7 +137,7 @@ public class TestCacheOnWriteInSchema { @Parameters public static Collection getParameters() { - List cowTypes = new ArrayList(); + List cowTypes = new ArrayList<>(); for (CacheOnWriteType cowType : CacheOnWriteType.values()) { cowTypes.add(new Object[] { cowType }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index c59f64bbec5..5cfa17d3a7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -90,14 +90,14 @@ public class TestColumnSeeking { double majorPercentage = 0.2; double putPercentage = 0.2; - HashMap allKVMap = new HashMap(); + HashMap allKVMap = new HashMap<>(); HashMap[] kvMaps = new HashMap[numberOfTests]; ArrayList[] columnLists = new ArrayList[numberOfTests]; for (int i = 0; i < numberOfTests; i++) { - kvMaps[i] = new HashMap(); - columnLists[i] = new ArrayList(); + kvMaps[i] = new HashMap<>(); + columnLists[i] = new ArrayList<>(); for (String column : allColumns) { if (Math.random() < selectPercent) { columnLists[i].add(column); @@ -162,7 +162,7 @@ public class TestColumnSeeking { } InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results)) ; assertEquals(kvSet.size(), results.size()); @@ -201,15 +201,15 @@ public class TestColumnSeeking { double majorPercentage = 0.2; double putPercentage = 0.2; - HashMap allKVMap = new HashMap(); + HashMap allKVMap = new HashMap<>(); HashMap[] kvMaps = new HashMap[numberOfTests]; ArrayList[] columnLists = new ArrayList[numberOfTests]; String valueString = "Value"; for (int i = 0; i < numberOfTests; i++) { - kvMaps[i] = new HashMap(); - columnLists[i] = new ArrayList(); + kvMaps[i] = new HashMap<>(); + columnLists[i] = new ArrayList<>(); for (String column : allColumns) { if (Math.random() < selectPercent) { columnLists[i].add(column); @@ -274,7 +274,7 @@ public class TestColumnSeeking { } InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results)) ; assertEquals(kvSet.size(), results.size()); @@ -285,7 +285,7 @@ public class TestColumnSeeking { } List generateRandomWords(int numberOfWords, String suffix) { - Set wordSet = new HashSet(); + Set wordSet = new HashSet<>(); for (int i = 0; i < numberOfWords; i++) { int lengthOfWords = (int) (Math.random() * 5) + 1; char[] wordChar = new char[lengthOfWords]; @@ -300,7 +300,7 @@ public class TestColumnSeeking { } wordSet.add(word); } - List wordList = new ArrayList(wordSet); + List wordList = new ArrayList<>(wordSet); return wordList; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 65ad956af80..63bbe653239 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -191,7 +191,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { InternalScanner scanner = new StoreScanner(new Scan( Bytes.toBytes(startRowId)), scanInfo, scanType, null, memstore.getScanners(0)); - List results = new ArrayList(); + List results = new ArrayList<>(); for (int i = 0; scanner.next(results); i++) { int rowId = startRowId + i; Cell left = results.get(0); @@ -199,7 +199,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { assertTrue("Row name", CellComparator.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0); assertEquals("Count of columns", QUALIFIER_COUNT, results.size()); - List row = new ArrayList(); + List row = new ArrayList<>(); for (Cell kv : results) { row.add(kv); } @@ -255,7 +255,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testUpsertMemstoreSize() throws Exception { MemstoreSize oldSize = memstore.size(); - List l = new ArrayList(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v"); KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v"); @@ -313,7 +313,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { t = runSnapshot(memstore, true); // test the case that the timeOfOldestEdit is updated after a KV upsert - List l = new ArrayList(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); kv1.setSequenceId(100); l.add(kv1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index bc51c413166..1bf6ea794e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -370,8 +370,7 @@ public class TestCompaction { // setup a region/store with some files int numStores = r.getStores().size(); - List> requests = - new ArrayList>(numStores); + List> requests = new ArrayList<>(numStores); CountDownLatch latch = new CountDownLatch(numStores); // create some store files and setup requests for each store on which we want to do a // compaction @@ -379,8 +378,7 @@ public class TestCompaction { createStoreFile(r, store.getColumnFamilyName()); createStoreFile(r, store.getColumnFamilyName()); createStoreFile(r, store.getColumnFamilyName()); - requests - .add(new Pair(new TrackableCompactionRequest(latch), store)); + requests.add(new Pair<>(new TrackableCompactionRequest(latch), store)); } thread.requestCompaction(r, "test mulitple custom comapctions", Store.PRIORITY_USER, @@ -393,8 +391,8 @@ public class TestCompaction { } private class StoreMockMaker extends StatefulStoreMockMaker { - public ArrayList compacting = new ArrayList(); - public ArrayList notCompacting = new ArrayList(); + public ArrayList compacting = new ArrayList<>(); + public ArrayList notCompacting = new ArrayList<>(); private ArrayList results; public StoreMockMaker(ArrayList results) { @@ -410,7 +408,7 @@ public class TestCompaction { @Override public List preSelect(List filesCompacting) { - return new ArrayList(); + return new ArrayList<>(); } @Override @@ -425,13 +423,13 @@ public class TestCompaction { public List compact(ThroughputController throughputController, User user) throws IOException { finishCompaction(this.selectedFiles); - return new ArrayList(); + return new ArrayList<>(); } } @Override public synchronized CompactionContext selectCompaction() { - CompactionContext ctx = new TestCompactionContext(new ArrayList(notCompacting)); + CompactionContext ctx = new TestCompactionContext(new ArrayList<>(notCompacting)); compacting.addAll(notCompacting); notCompacting.clear(); try { @@ -484,18 +482,18 @@ public class TestCompaction { } catch (InterruptedException e) { Assume.assumeNoException(e); } - return new ArrayList(); + return new ArrayList<>(); } @Override public List preSelect(List filesCompacting) { - return new ArrayList(); + return new ArrayList<>(); } @Override public boolean select(List f, boolean i, boolean m, boolean e) throws IOException { - this.request = new CompactionRequest(new ArrayList()); + this.request = new CompactionRequest(new ArrayList<>()); return true; } } @@ -568,7 +566,7 @@ public class TestCompaction { }); // Set up store mocks for 2 "real" stores and the one we use for blocking CST. - ArrayList results = new ArrayList(); + ArrayList results = new ArrayList<>(); StoreMockMaker sm = new StoreMockMaker(results), sm2 = new StoreMockMaker(results); Store store = sm.createStoreMock("store1"), store2 = sm2.createStoreMock("store2"); BlockingStoreMockMaker blocker = new BlockingStoreMockMaker(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java index 7c7bfd34ed3..8e85730a524 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java @@ -91,7 +91,7 @@ public class TestCompactionArchiveConcurrentClose { HRegionInfo info = new HRegionInfo(tableName, null, null, false); Region region = initHRegion(htd, info); RegionServerServices rss = mock(RegionServerServices.class); - List regions = new ArrayList(); + List regions = new ArrayList<>(); regions.add(region); when(rss.getOnlineRegions()).thenReturn(regions); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java index cf9925837ff..89b23689793 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java @@ -98,7 +98,7 @@ public class TestCompactionArchiveIOException { HRegionInfo info = new HRegionInfo(tableName, null, null, false); final HRegion region = initHRegion(htd, info); RegionServerServices rss = mock(RegionServerServices.class); - List regions = new ArrayList(); + List regions = new ArrayList<>(); regions.add(region); when(rss.getOnlineRegions()).thenReturn(regions); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java index 24b3667430f..7154511e1f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java @@ -136,7 +136,7 @@ public class TestCompactionPolicy { } ArrayList toArrayList(long... numbers) { - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); for (long i : numbers) { result.add(i); } @@ -144,7 +144,7 @@ public class TestCompactionPolicy { } List sfCreate(long... sizes) throws IOException { - ArrayList ageInDisk = new ArrayList(); + ArrayList ageInDisk = new ArrayList<>(); for (int i = 0; i < sizes.length; i++) { ageInDisk.add(0L); } @@ -156,7 +156,7 @@ public class TestCompactionPolicy { } List sfCreate(boolean isReference, long... sizes) throws IOException { - ArrayList ageInDisk = new ArrayList(sizes.length); + ArrayList ageInDisk = new ArrayList<>(sizes.length); for (int i = 0; i < sizes.length; i++) { ageInDisk.add(0L); } @@ -196,8 +196,8 @@ public class TestCompactionPolicy { // Test Default compactions CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction( - candidates, new ArrayList(), false, isOffPeak, forcemajor); - List actual = new ArrayList(result.getFiles()); + candidates, new ArrayList<>(), false, isOffPeak, forcemajor); + List actual = new ArrayList<>(result.getFiles()); if (isOffPeak && !forcemajor) { Assert.assertTrue(result.isOffPeak()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java index 0e6fb54264a..8c5532795c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java @@ -220,7 +220,7 @@ public class TestCompactionState { private static void loadData(final Table ht, final byte[][] families, final int rows, final int flushes) throws IOException { - List puts = new ArrayList(rows); + List puts = new ArrayList<>(rows); byte[] qualifier = Bytes.toBytes("val"); for (int i = 0; i < flushes; i++) { for (int k = 0; k < rows; k++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index 174843e8b6d..dfea761594d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -142,7 +142,7 @@ public class TestCompoundBloomFilter { } private List createSortedKeyValues(Random rand, int n) { - List kvList = new ArrayList(n); + List kvList = new ArrayList<>(n); for (int i = 0; i < n; ++i) kvList.add(RandomKeyValueUtil.randomKeyValue(rand)); Collections.sort(kvList, CellComparator.COMPARATOR); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java index 68b0ba3ee8d..cec5fc710fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java @@ -67,7 +67,7 @@ public class TestCorruptedRegionStoreFile { @Rule public TestTableName TEST_TABLE = new TestTableName(); - private final ArrayList storeFiles = new ArrayList(); + private final ArrayList storeFiles = new ArrayList<>(); private Path tableDir; private int rowCount; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java index 4fa18b80a2b..3c41fc5efd8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java @@ -119,7 +119,7 @@ public class TestDefaultCompactSelection extends TestCompactionPolicy { compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3); // empty case - compactEquals(new ArrayList() /* empty */); + compactEquals(new ArrayList<>() /* empty */); // empty case (because all files are too big) compactEquals(sfCreate(tooBig, tooBig) /* empty */); } @@ -175,7 +175,7 @@ public class TestDefaultCompactSelection extends TestCompactionPolicy { // Test Default compactions CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine .getCompactionPolicy()).selectCompaction(candidates, - new ArrayList(), false, false, false); + new ArrayList<>(), false, false, false); Assert.assertTrue(result.getFiles().isEmpty()); store.setScanInfo(oldScanInfo); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 43c185a729b..e6d3147c0c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -150,7 +150,7 @@ public class TestDefaultMemStore { int rowCount = addRows(this.memstore); List memstorescanners = this.memstore.getScanners(0); Scan scan = new Scan(); - List result = new ArrayList(); + List result = new ArrayList<>(); Configuration conf = HBaseConfiguration.create(); ScanInfo scanInfo = new ScanInfo(conf, null, 0, 1, HConstants.LATEST_TIMESTAMP, KeepDeletedCells.FALSE, 0, @@ -502,7 +502,7 @@ public class TestDefaultMemStore { int NUM_THREADS = 8; ReadOwnWritesTester threads[] = new ReadOwnWritesTester[NUM_THREADS]; - AtomicReference caught = new AtomicReference(); + AtomicReference caught = new AtomicReference<>(); for (int i = 0; i < NUM_THREADS; i++) { threads[i] = new ReadOwnWritesTester(i, memstore, mvcc, caught, this.startSeqNum); @@ -589,7 +589,7 @@ public class TestDefaultMemStore { try (InternalScanner scanner = new StoreScanner(new Scan( Bytes.toBytes(startRowId)), scanInfo, scanType, null, memstore.getScanners(0))) { - List results = new ArrayList(); + List results = new ArrayList<>(); for (int i = 0; scanner.next(results); i++) { int rowId = startRowId + i; Cell left = results.get(0); @@ -598,7 +598,7 @@ public class TestDefaultMemStore { "Row name", CellComparator.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0); assertEquals("Count of columns", QUALIFIER_COUNT, results.size()); - List row = new ArrayList(); + List row = new ArrayList<>(); for (Cell kv : results) { row.add(kv); } @@ -660,7 +660,7 @@ public class TestDefaultMemStore { KeyValue del2 = new KeyValue(row, fam, qf1, ts2, KeyValue.Type.Delete, val); memstore.add(del2, null); - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(put3); expected.add(del2); expected.add(put2); @@ -696,7 +696,7 @@ public class TestDefaultMemStore { new KeyValue(row, fam, qf1, ts2, KeyValue.Type.DeleteColumn, val); memstore.add(del2, null); - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(put3); expected.add(del2); expected.add(put2); @@ -733,7 +733,7 @@ public class TestDefaultMemStore { new KeyValue(row, fam, null, ts, KeyValue.Type.DeleteFamily, val); memstore.add(del, null); - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(del); expected.add(put1); expected.add(put2); @@ -822,7 +822,7 @@ public class TestDefaultMemStore { memstore = new DefaultMemStore(conf, CellComparator.COMPARATOR); MemstoreSize oldSize = memstore.size(); - List l = new ArrayList(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v"); KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v"); @@ -880,7 +880,7 @@ public class TestDefaultMemStore { t = runSnapshot(memstore); // test the case that the timeOfOldestEdit is updated after a KV upsert - List l = new ArrayList(); + List l = new ArrayList<>(); KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); kv1.setSequenceId(100); l.add(kv1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java index 3eb86be18bb..b34c307a9ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java @@ -233,7 +233,7 @@ public class TestEncryptionKeyRotation { } private static List findStorefilePaths(TableName tableName) throws Exception { - List paths = new ArrayList(); + List paths = new ArrayList<>(); for (Region region: TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(tableName)) { for (Store store: region.getStores()) { @@ -246,7 +246,7 @@ public class TestEncryptionKeyRotation { } private static List findCompactedStorefilePaths(TableName tableName) throws Exception { - List paths = new ArrayList(); + List paths = new ArrayList<>(); for (Region region: TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(tableName)) { for (Store store : region.getStores()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java index 760bdacc884..2b0ab7bc89c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java @@ -52,7 +52,7 @@ public class TestEncryptionRandomKeying { private static HTableDescriptor htd; private static List findStorefilePaths(TableName tableName) throws Exception { - List paths = new ArrayList(); + List paths = new ArrayList<>(); for (Region region: TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(htd.getTableName())) { for (Store store: region.getStores()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index 9ed0c2a1690..10510365df1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -245,7 +245,7 @@ public class TestEndToEndSplitTransaction { verifyStartEndKeys(keys); //HTable.getRegionsInfo() - Set regions = new TreeSet(); + Set regions = new TreeSet<>(); for (HRegionLocation loc : rl.getAllRegionLocations()) { regions.add(loc.getRegionInfo()); } @@ -275,7 +275,7 @@ public class TestEndToEndSplitTransaction { i++; } - Pair keys = new Pair(startKeys, endKeys); + Pair keys = new Pair<>(startKeys, endKeys); verifyStartEndKeys(keys); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index 9a49c5db2ca..9f0975d8b7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -233,8 +233,7 @@ public class TestFSErrorsExposed { } static class FaultyFileSystem extends FilterFileSystem { - List> inStreams = - new ArrayList>(); + List> inStreams = new ArrayList<>(); public FaultyFileSystem(FileSystem testFileSystem) { super(testFileSystem); @@ -244,7 +243,7 @@ public class TestFSErrorsExposed { public FSDataInputStream open(Path p, int bufferSize) throws IOException { FSDataInputStream orig = fs.open(p, bufferSize); FaultyInputStream faulty = new FaultyInputStream(orig); - inStreams.add(new SoftReference(faulty)); + inStreams.add(new SoftReference<>(faulty)); return faulty; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 7ee3f0bd8b8..570d2d8e16c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -101,7 +101,7 @@ public class TestGetClosestAtOrBefore { } InternalScanner s = mr.getScanner(new Scan()); try { - List keys = new ArrayList(); + List keys = new ArrayList<>(); while (s.next(keys)) { LOG.info(keys); keys.clear(); @@ -125,7 +125,7 @@ public class TestGetClosestAtOrBefore { Scan scan = new Scan(firstRowInC); s = mr.getScanner(scan); try { - List keys = new ArrayList(); + List keys = new ArrayList<>(); while (s.next(keys)) { mr.delete(new Delete(CellUtil.cloneRow(keys.get(0)))); keys.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index 6877fcafc0f..b416c7d9e53 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -100,9 +100,8 @@ public class TestHMobStore { private Cell seekKey1; private Cell seekKey2; private Cell seekKey3; - private NavigableSet qualifiers = - new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR); - private List expected = new ArrayList(); + private NavigableSet qualifiers = new ConcurrentSkipListSet<>(Bytes.BYTES_COMPARATOR); + private List expected = new ArrayList<>(); private long id = System.currentTimeMillis(); private Get get = new Get(row); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -222,7 +221,7 @@ public class TestHMobStore { scan.getFamilyMap().get(store.getFamily().getName()), 0); - List results = new ArrayList(); + List results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparator.COMPARATOR); scanner.close(); @@ -267,7 +266,7 @@ public class TestHMobStore { scan.getFamilyMap().get(store.getFamily().getName()), 0); - List results = new ArrayList(); + List results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparator.COMPARATOR); scanner.close(); @@ -312,7 +311,7 @@ public class TestHMobStore { scan.getFamilyMap().get(store.getFamily().getName()), 0); - List results = new ArrayList(); + List results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparator.COMPARATOR); scanner.close(); @@ -357,7 +356,7 @@ public class TestHMobStore { scan.getFamilyMap().get(store.getFamily().getName()), 0); - List results = new ArrayList(); + List results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparator.COMPARATOR); scanner.close(); @@ -409,7 +408,7 @@ public class TestHMobStore { scan.getFamilyMap().get(store.getFamily().getName()), 0); - List results = new ArrayList(); + List results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparator.COMPARATOR); scanner.close(); @@ -525,7 +524,7 @@ public class TestHMobStore { scan.getFamilyMap().get(store.getFamily().getName()), 0); - List results = new ArrayList(); + List results = new ArrayList<>(); scanner.next(results); Collections.sort(results, CellComparator.COMPARATOR); scanner.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 35f1b7d5419..eac3c77b463 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -608,7 +608,7 @@ public class TestHRegion { // open the second scanner RegionScanner scanner2 = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); System.out.println("Smallest read point:" + region.getSmallestReadPoint()); @@ -657,7 +657,7 @@ public class TestHRegion { region.compact(true); scanner1.reseek(Bytes.toBytes("r2")); - List results = new ArrayList(); + List results = new ArrayList<>(); scanner1.next(results); Cell keyValue = results.get(0); Assert.assertTrue(Bytes.compareTo(CellUtil.cloneRow(keyValue), Bytes.toBytes("r2")) == 0); @@ -694,7 +694,7 @@ public class TestHRegion { writer.close(); } MonitoredTask status = TaskMonitor.get().createStatus(method); - Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1); } @@ -746,7 +746,7 @@ public class TestHRegion { } long recoverSeqId = 1030; MonitoredTask status = TaskMonitor.get().createStatus(method); - Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1); } @@ -791,7 +791,7 @@ public class TestHRegion { FSDataOutputStream dos = fs.create(recoveredEdits); dos.close(); - Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId); } @@ -848,7 +848,7 @@ public class TestHRegion { } long recoverSeqId = 1030; - Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); MonitoredTask status = TaskMonitor.get().createStatus(method); for (Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1); @@ -893,7 +893,7 @@ public class TestHRegion { // this will create a region with 3 files assertEquals(3, region.getStore(family).getStorefilesCount()); - List storeFiles = new ArrayList(3); + List storeFiles = new ArrayList<>(3); for (StoreFile sf : region.getStore(family).getStorefiles()) { storeFiles.add(sf.getPath()); } @@ -1007,7 +1007,7 @@ public class TestHRegion { // this will create a region with 3 files from flush assertEquals(3, region.getStore(family).getStorefilesCount()); - List storeFiles = new ArrayList(3); + List storeFiles = new ArrayList<>(3); for (StoreFile sf : region.getStore(family).getStorefiles()) { storeFiles.add(sf.getPath().getName()); } @@ -1017,7 +1017,7 @@ public class TestHRegion { WAL.Reader reader = WALFactory.createReader(fs, AbstractFSWALProvider.getCurrentFileName(wal), TEST_UTIL.getConfiguration()); try { - List flushDescriptors = new ArrayList(); + List flushDescriptors = new ArrayList<>(); long lastFlushSeqId = -1; while (true) { WAL.Entry entry = reader.next(); @@ -1422,7 +1422,7 @@ public class TestHRegion { InternalScanner scanner = buildScanner(keyPrefix, value, r); int count = 0; boolean more = false; - List results = new ArrayList(); + List results = new ArrayList<>(); do { more = scanner.next(results); if (results != null && !results.isEmpty()) @@ -1440,7 +1440,7 @@ public class TestHRegion { private int getNumberOfRows(String keyPrefix, String value, HRegion r) throws Exception { InternalScanner resultScanner = buildScanner(keyPrefix, value, r); int numberOfResults = 0; - List results = new ArrayList(); + List results = new ArrayList<>(); boolean more = false; do { more = resultScanner.next(results); @@ -1579,7 +1579,7 @@ public class TestHRegion { MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(CONF); - final AtomicReference retFromThread = new AtomicReference(); + final AtomicReference retFromThread = new AtomicReference<>(); final CountDownLatch startingPuts = new CountDownLatch(1); final CountDownLatch startingClose = new CountDownLatch(1); TestThread putter = new TestThread(ctx) { @@ -2112,14 +2112,13 @@ public class TestHRegion { // Setting up region this.region = initHRegion(tableName, method, CONF, fam1, fam2, fam3); try { - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); kvs.add(new KeyValue(row1, fam4, null, null)); // testing existing family byte[] family = fam2; try { - NavigableMap> deleteMap = new TreeMap>( - Bytes.BYTES_COMPARATOR); + NavigableMap> deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); deleteMap.put(family, kvs); region.delete(deleteMap, Durability.SYNC_WAL); } catch (Exception e) { @@ -2130,8 +2129,7 @@ public class TestHRegion { boolean ok = false; family = fam4; try { - NavigableMap> deleteMap = new TreeMap>( - Bytes.BYTES_COMPARATOR); + NavigableMap> deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); deleteMap.put(family, kvs); region.delete(deleteMap, Durability.SYNC_WAL); } catch (Exception e) { @@ -2361,7 +2359,7 @@ public class TestHRegion { Scan scan = new Scan(); scan.addFamily(fam1).addFamily(fam2); InternalScanner s = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); s.next(results); assertTrue(CellUtil.matchingRow(results.get(0), rowA)); @@ -2488,7 +2486,7 @@ public class TestHRegion { scan.addColumn(fam1, qual1); InternalScanner s = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); assertEquals(false, s.next(results)); assertEquals(1, results.size()); Cell kv = results.get(0); @@ -2514,13 +2512,12 @@ public class TestHRegion { this.region = initHRegion(tableName, method, CONF, fam1); try { // Building checkerList - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); kvs.add(new KeyValue(row1, fam1, col1, null)); kvs.add(new KeyValue(row1, fam1, col2, null)); kvs.add(new KeyValue(row1, fam1, col3, null)); - NavigableMap> deleteMap = new TreeMap>( - Bytes.BYTES_COMPARATOR); + NavigableMap> deleteMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); deleteMap.put(fam1, kvs); region.delete(deleteMap, Durability.SYNC_WAL); @@ -2811,22 +2808,22 @@ public class TestHRegion { List res = null; // Result 1 - List expected1 = new ArrayList(); + List expected1 = new ArrayList<>(); expected1.add(new KeyValue(row1, fam2, null, ts, KeyValue.Type.Put, null)); expected1.add(new KeyValue(row1, fam4, null, ts, KeyValue.Type.Put, null)); - res = new ArrayList(); + res = new ArrayList<>(); is.next(res); for (int i = 0; i < res.size(); i++) { assertTrue(CellUtil.equalsIgnoreMvccVersion(expected1.get(i), res.get(i))); } // Result 2 - List expected2 = new ArrayList(); + List expected2 = new ArrayList<>(); expected2.add(new KeyValue(row2, fam2, null, ts, KeyValue.Type.Put, null)); expected2.add(new KeyValue(row2, fam4, null, ts, KeyValue.Type.Put, null)); - res = new ArrayList(); + res = new ArrayList<>(); is.next(res); for (int i = 0; i < res.size(); i++) { assertTrue(CellUtil.equalsIgnoreMvccVersion(expected2.get(i), res.get(i))); @@ -2872,14 +2869,14 @@ public class TestHRegion { region.put(put); // Expected - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(kv13); expected.add(kv12); Scan scan = new Scan(row1); scan.addColumn(fam1, qf1); scan.setMaxVersions(MAX_VERSIONS); - List actual = new ArrayList(); + List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(actual); @@ -2931,7 +2928,7 @@ public class TestHRegion { region.flush(true); // Expected - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(kv13); expected.add(kv12); expected.add(kv23); @@ -2941,7 +2938,7 @@ public class TestHRegion { scan.addColumn(fam1, qf1); scan.addColumn(fam1, qf2); scan.setMaxVersions(MAX_VERSIONS); - List actual = new ArrayList(); + List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(actual); @@ -3010,7 +3007,7 @@ public class TestHRegion { region.put(put); // Expected - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(kv14); expected.add(kv13); expected.add(kv12); @@ -3023,7 +3020,7 @@ public class TestHRegion { scan.addColumn(fam1, qf2); int versions = 3; scan.setMaxVersions(versions); - List actual = new ArrayList(); + List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(actual); @@ -3074,7 +3071,7 @@ public class TestHRegion { region.put(put); // Expected - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(kv13); expected.add(kv12); expected.add(kv23); @@ -3083,7 +3080,7 @@ public class TestHRegion { Scan scan = new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(MAX_VERSIONS); - List actual = new ArrayList(); + List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(actual); @@ -3134,7 +3131,7 @@ public class TestHRegion { region.flush(true); // Expected - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(kv13); expected.add(kv12); expected.add(kv23); @@ -3143,7 +3140,7 @@ public class TestHRegion { Scan scan = new Scan(row1); scan.addFamily(fam1); scan.setMaxVersions(MAX_VERSIONS); - List actual = new ArrayList(); + List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(actual); @@ -3198,7 +3195,7 @@ public class TestHRegion { scan.addColumn(family, col1); InternalScanner s = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); assertEquals(false, s.next(results)); assertEquals(0, results.size()); } finally { @@ -3258,7 +3255,7 @@ public class TestHRegion { region.put(put); // Expected - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(kv14); expected.add(kv13); expected.add(kv12); @@ -3269,7 +3266,7 @@ public class TestHRegion { Scan scan = new Scan(row1); int versions = 3; scan.setMaxVersions(versions); - List actual = new ArrayList(); + List actual = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(actual); @@ -3334,7 +3331,7 @@ public class TestHRegion { scan.setLoadColumnFamiliesOnDemand(true); InternalScanner s = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); assertTrue(s.next(results)); assertEquals(results.size(), 1); results.clear(); @@ -3427,7 +3424,7 @@ public class TestHRegion { // r8: first:a // r9: first:a - List results = new ArrayList(); + List results = new ArrayList<>(); int index = 0; ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(3).build(); while (true) { @@ -3501,7 +3498,7 @@ public class TestHRegion { new BinaryComparator(Bytes.toBytes(5L)))); int expectedCount = 0; - List res = new ArrayList(); + List res = new ArrayList<>(); boolean toggle = true; for (long i = 0; i < numRows; i++) { @@ -3643,7 +3640,7 @@ public class TestHRegion { Scan scan = new Scan(Bytes.toBytes("row0"), Bytes.toBytes("row1")); int expectedCount = numFamilies * numQualifiers; - List res = new ArrayList(); + List res = new ArrayList<>(); long prevTimestamp = 0L; for (int i = 0; i < testCount; i++) { @@ -3943,7 +3940,7 @@ public class TestHRegion { new BinaryComparator(Bytes.toBytes(0L))), new SingleColumnValueFilter(family, qual1, CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(3L)))))); InternalScanner scanner = region.getScanner(idxScan); - List res = new ArrayList(); + List res = new ArrayList<>(); while (scanner.next(res)) ; @@ -4841,7 +4838,7 @@ public class TestHRegion { scan.addFamily(families[i]); InternalScanner s = r.getScanner(scan); try { - List curVals = new ArrayList(); + List curVals = new ArrayList<>(); boolean first = true; OUTER_LOOP: while (s.next(curVals)) { for (Cell kv : curVals) { @@ -5000,7 +4997,7 @@ public class TestHRegion { scan.setMaxVersions(5); scan.setReversed(true); InternalScanner scanner = region.getScanner(scan); - List currRow = new ArrayList(); + List currRow = new ArrayList<>(); boolean hasNext = scanner.next(currRow); assertEquals(2, currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow @@ -5056,7 +5053,7 @@ public class TestHRegion { region.put(put); Scan scan = new Scan(rowD); - List currRow = new ArrayList(); + List currRow = new ArrayList<>(); scan.setReversed(true); scan.setMaxVersions(5); InternalScanner scanner = region.getScanner(scan); @@ -5113,7 +5110,7 @@ public class TestHRegion { put.add(kv3); region.put(put); Scan scan = new Scan(); - List currRow = new ArrayList(); + List currRow = new ArrayList<>(); scan.setReversed(true); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); @@ -5184,7 +5181,7 @@ public class TestHRegion { Scan scan = new Scan(rowD, rowA); scan.addColumn(families[0], col1); scan.setReversed(true); - List currRow = new ArrayList(); + List currRow = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); @@ -5267,7 +5264,7 @@ public class TestHRegion { Scan scan = new Scan(rowD, rowA); scan.addColumn(families[0], col1); scan.setReversed(true); - List currRow = new ArrayList(); + List currRow = new ArrayList<>(); InternalScanner scanner = region.getScanner(scan); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); @@ -5412,7 +5409,7 @@ public class TestHRegion { scan.setBatch(3); scan.setReversed(true); InternalScanner scanner = region.getScanner(scan); - List currRow = new ArrayList(); + List currRow = new ArrayList<>(); boolean hasNext = false; // 1. scan out "row4" (5 kvs), "row5" can't be scanned out since not // included in scan range @@ -5519,7 +5516,7 @@ public class TestHRegion { scan.setReversed(true); scan.setBatch(10); InternalScanner scanner = region.getScanner(scan); - List currRow = new ArrayList(); + List currRow = new ArrayList<>(); boolean hasNext = scanner.next(currRow); assertEquals(1, currRow.size()); assertTrue(Bytes.equals(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(), currRow @@ -6348,7 +6345,7 @@ public class TestHRegion { CONF.setInt("hbase.regionserver.wal.disruptor.event.count", 2); this.region = initHRegion(tableName, method, CONF, families); try { - List threads = new ArrayList(); + List threads = new ArrayList<>(); for (int i = 0; i < numRows; i++) { final int count = i; Thread t = new Thread(new Runnable() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java index 23c96a20c86..41ad68bf5b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java @@ -159,7 +159,7 @@ public class TestHRegionOnCluster { put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes(value)); table.put(put); ResultScanner resultScanner = table.getScanner(new Scan()); - List results = new ArrayList(); + List results = new ArrayList<>(); while (true) { Result r = resultScanner.next(); if (r == null) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index ac10f8c39bb..005464262bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -179,7 +179,7 @@ public class TestHRegionReplayEvents { when(rss.getExecutorService()).thenReturn(es); primaryRegion = HRegion.createHRegion(primaryHri, rootDir, CONF, htd, walPrimary); primaryRegion.close(); - List regions = new ArrayList(); + List regions = new ArrayList<>(); regions.add(primaryRegion); when(rss.getOnlineRegions()).thenReturn(regions); @@ -1391,7 +1391,7 @@ public class TestHRegionReplayEvents { // Test case 3: compact primary files primaryRegion.compactStores(); - List regions = new ArrayList(); + List regions = new ArrayList<>(); regions.add(primaryRegion); when(rss.getOnlineRegions()).thenReturn(regions); CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, rss, false); @@ -1486,11 +1486,10 @@ public class TestHRegionReplayEvents { random.nextBytes(randomValues); Path testPath = TEST_UTIL.getDataTestDirOnTestFS(); - List> familyPaths = new ArrayList>(); + List> familyPaths = new ArrayList<>(); int expectedLoadFileCount = 0; for (byte[] family : families) { - familyPaths.add(new Pair(family, createHFileForFamilies(testPath, family, - randomValues))); + familyPaths.add(new Pair<>(family, createHFileForFamilies(testPath, family, randomValues))); expectedLoadFileCount++; } primaryRegion.bulkLoadHFiles(familyPaths, false, null); @@ -1519,7 +1518,7 @@ public class TestHRegionReplayEvents { secondaryRegion.replayWALBulkLoadEventMarker(bulkloadEvent); - List storeFileName = new ArrayList(); + List storeFileName = new ArrayList<>(); for (StoreDescriptor storeDesc : bulkloadEvent.getStoresList()) { storeFileName.addAll(storeDesc.getStoreFileList()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 398711e446b..0ac51533898 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -108,7 +108,7 @@ public class TestHRegionServerBulkLoad { @Parameters public static final Collection parameters() { int[] sleepDurations = new int[] { 0, 30000 }; - List configurations = new ArrayList(); + List configurations = new ArrayList<>(); for (int i : sleepDurations) { configurations.add(new Object[] { i }); } @@ -189,8 +189,7 @@ public class TestHRegionServerBulkLoad { // create HFiles for different column families FileSystem fs = UTIL.getTestFileSystem(); byte[] val = Bytes.toBytes(String.format("%010d", iteration)); - final List> famPaths = new ArrayList>( - NUM_CFS); + final List> famPaths = new ArrayList<>(NUM_CFS); for (int i = 0; i < NUM_CFS; i++) { Path hfile = new Path(dir, family(i)); byte[] fam = Bytes.toBytes(family(i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java index f68fda97e5b..7aa1b31baa2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java @@ -80,8 +80,7 @@ public class TestHRegionServerBulkLoadWithOldClient extends TestHRegionServerBul // create HFiles for different column families FileSystem fs = UTIL.getTestFileSystem(); byte[] val = Bytes.toBytes(String.format("%010d", iteration)); - final List> famPaths = new ArrayList>( - NUM_CFS); + final List> famPaths = new ArrayList<>(NUM_CFS); for (int i = 0; i < NUM_CFS; i++) { Path hfile = new Path(dir, family(i)); byte[] fam = Bytes.toBytes(family(i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java index 00cc50a3183..83810f20245 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java @@ -113,7 +113,7 @@ public class TestJoinedScanners { byte [] val_large = new byte[valueWidth]; - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (long i = 0; i < rows_to_insert; i++) { Put put = new Put(Bytes.toBytes(Long.toString (i))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java index 3e32772d1bc..d93152a6864 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java @@ -212,7 +212,7 @@ public class TestKeepDeletes { s.setRaw(true); s.setMaxVersions(); InternalScanner scan = region.getScanner(s); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); scan.next(kvs); assertEquals(2, kvs.size()); @@ -226,7 +226,7 @@ public class TestKeepDeletes { s.setRaw(true); s.setMaxVersions(); scan = region.getScanner(s); - kvs = new ArrayList(); + kvs = new ArrayList<>(); scan.next(kvs); assertTrue(kvs.isEmpty()); @@ -271,7 +271,7 @@ public class TestKeepDeletes { s.setMaxVersions(); s.setTimeRange(0L, ts+1); InternalScanner scanner = region.getScanner(s); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); while (scanner.next(kvs)) ; assertTrue(kvs.isEmpty()); @@ -346,7 +346,7 @@ public class TestKeepDeletes { s.setRaw(true); s.setMaxVersions(); InternalScanner scan = region.getScanner(s); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); scan.next(kvs); assertEquals(8, kvs.size()); assertTrue(CellUtil.isDeleteFamily(kvs.get(0))); @@ -365,7 +365,7 @@ public class TestKeepDeletes { s.setMaxVersions(); s.setTimeRange(0, 1); scan = region.getScanner(s); - kvs = new ArrayList(); + kvs = new ArrayList<>(); scan.next(kvs); // nothing in this interval, not even delete markers assertTrue(kvs.isEmpty()); @@ -376,7 +376,7 @@ public class TestKeepDeletes { s.setMaxVersions(); s.setTimeRange(0, ts+2); scan = region.getScanner(s); - kvs = new ArrayList(); + kvs = new ArrayList<>(); scan.next(kvs); assertEquals(4, kvs.size()); assertTrue(CellUtil.isDeleteFamily(kvs.get(0))); @@ -391,7 +391,7 @@ public class TestKeepDeletes { s.setMaxVersions(); s.setTimeRange(ts+3, ts+5); scan = region.getScanner(s); - kvs = new ArrayList(); + kvs = new ArrayList<>(); scan.next(kvs); assertEquals(2, kvs.size()); assertArrayEquals(CellUtil.cloneValue(kvs.get(0)), T3); @@ -794,7 +794,7 @@ public class TestKeepDeletes { Scan s = new Scan(T1); s.setTimeRange(0, ts+1); InternalScanner scanner = region.getScanner(s); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); scanner.next(kvs); assertEquals(4, kvs.size()); scanner.close(); @@ -802,7 +802,7 @@ public class TestKeepDeletes { s = new Scan(T2); s.setTimeRange(0, ts+2); scanner = region.getScanner(s); - kvs = new ArrayList(); + kvs = new ArrayList<>(); scanner.next(kvs); assertEquals(4, kvs.size()); scanner.close(); @@ -951,7 +951,7 @@ public class TestKeepDeletes { // use max versions from the store(s) s.setMaxVersions(region.getStores().iterator().next().getScanInfo().getMaxVersions()); InternalScanner scan = region.getScanner(s); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); int res = 0; boolean hasMore; do { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java index b030c7471f5..d574e75e424 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java @@ -67,7 +67,7 @@ public class TestKeyValueHeap extends HBaseTestCase { TestScanner s2 = new TestScanner(Arrays.asList(kv111, kv112)); TestScanner s3 = new TestScanner(Arrays.asList(kv113, kv114, kv121, kv122, kv213)); - List scanners = new ArrayList(Arrays.asList(s1, s2, s3)); + List scanners = new ArrayList<>(Arrays.asList(s1, s2, s3)); /* * Uses {@code scanners} to build a KeyValueHeap, iterates over it and asserts that returned @@ -136,7 +136,7 @@ public class TestKeyValueHeap extends HBaseTestCase { public void testScannerLeak() throws IOException { // Test for unclosed scanners (HBASE-1927) - TestScanner s4 = new TestScanner(new ArrayList()); + TestScanner s4 = new TestScanner(new ArrayList<>()); scanners.add(s4); //Creating KeyValueHeap @@ -163,9 +163,9 @@ public class TestKeyValueHeap extends HBaseTestCase { TestScanner s1 = new SeekTestScanner(Arrays.asList(kv115, kv211, kv212)); TestScanner s2 = new SeekTestScanner(Arrays.asList(kv111, kv112)); TestScanner s3 = new SeekTestScanner(Arrays.asList(kv113, kv114, kv121, kv122, kv213)); - TestScanner s4 = new SeekTestScanner(new ArrayList()); + TestScanner s4 = new SeekTestScanner(new ArrayList<>()); - List scanners = new ArrayList(Arrays.asList(s1, s2, s3, s4)); + List scanners = new ArrayList<>(Arrays.asList(s1, s2, s3, s4)); // Creating KeyValueHeap KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparator.COMPARATOR); @@ -197,13 +197,13 @@ public class TestKeyValueHeap extends HBaseTestCase { TestScanner scan1 = new TestScanner(Arrays.asList(kv111, kv112, kv113A), 1); TestScanner scan2 = new TestScanner(Arrays.asList(kv113B), 2); List expected = Arrays.asList(kv111, kv112, kv113B, kv113A); - assertCells(expected, new ArrayList(Arrays.asList(scan1, scan2))); + assertCells(expected, new ArrayList<>(Arrays.asList(scan1, scan2))); } { TestScanner scan1 = new TestScanner(Arrays.asList(kv111, kv112, kv113A), 2); TestScanner scan2 = new TestScanner(Arrays.asList(kv113B), 1); List expected = Arrays.asList(kv111, kv112, kv113A, kv113B); - assertCells(expected, new ArrayList(Arrays.asList(scan1, scan2))); + assertCells(expected, new ArrayList<>(Arrays.asList(scan1, scan2))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index edc46ca3def..9d00d3856f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -137,7 +137,7 @@ public class TestMajorCompaction { // Now delete everything. InternalScanner s = r.getScanner(new Scan()); do { - List results = new ArrayList(); + List results = new ArrayList<>(); boolean result = s.next(results); r.delete(new Delete(CellUtil.cloneRow(results.get(0)))); if (!result) break; @@ -150,7 +150,7 @@ public class TestMajorCompaction { s = r.getScanner(new Scan()); int counter = 0; do { - List results = new ArrayList(); + List results = new ArrayList<>(); boolean result = s.next(results); if (!result) break; counter++; @@ -180,8 +180,7 @@ public class TestMajorCompaction { public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly) throws Exception { - Map replaceBlockCache = - new HashMap(); + Map replaceBlockCache = new HashMap<>(); for (Store store : r.getStores()) { HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder(); replaceBlockCache.put(store, blockEncoder); @@ -461,7 +460,7 @@ public class TestMajorCompaction { scan.setReversed(true); InternalScanner s = r.getScanner(scan); do { - List results = new ArrayList(); + List results = new ArrayList<>(); boolean result = s.next(results); assertTrue(!results.isEmpty()); r.delete(new Delete(CellUtil.cloneRow(results.get(0)))); @@ -477,7 +476,7 @@ public class TestMajorCompaction { s = r.getScanner(scan); int counter = 0; do { - List results = new ArrayList(); + List results = new ArrayList<>(); boolean result = s.next(results); if (!result) break; counter++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java index c106c046091..141b8023e2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java @@ -197,7 +197,7 @@ public class TestMemStoreLAB { MemStoreChunkPool.clearDisableFlag(); mslab = new MemStoreLABImpl(conf); // launch multiple threads to trigger frequent chunk retirement - List threads = new ArrayList(); + List threads = new ArrayList<>(); final KeyValue kv = new KeyValue(Bytes.toBytes("r"), Bytes.toBytes("f"), Bytes.toBytes("q"), new byte[MemStoreLABImpl.MAX_ALLOC_DEFAULT - 24]); for (int i = 0; i < 10; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java index 661583e08ab..52b5a409862 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java @@ -427,7 +427,7 @@ public class TestMinVersions { p.addColumn(c1, c1, T3); region.put(p); - List tss = new ArrayList(); + List tss = new ArrayList<>(); tss.add(ts-1); tss.add(ts-2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java index 15931c60213..7406e470487 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMiniBatchOperationInProgress.java @@ -40,10 +40,10 @@ public class TestMiniBatchOperationInProgress { OperationStatus[] retCodeDetails = new OperationStatus[10]; WALEdit[] walEditsFromCoprocessors = new WALEdit[10]; for (int i = 0; i < 10; i++) { - operations[i] = new Pair(new Put(Bytes.toBytes(i)), null); + operations[i] = new Pair<>(new Put(Bytes.toBytes(i)), null); } MiniBatchOperationInProgress> miniBatch = - new MiniBatchOperationInProgress>(operations, retCodeDetails, + new MiniBatchOperationInProgress<>(operations, retCodeDetails, walEditsFromCoprocessors, 0, 5); assertEquals(5, miniBatch.size()); @@ -68,7 +68,7 @@ public class TestMiniBatchOperationInProgress { } catch (ArrayIndexOutOfBoundsException e) { } - miniBatch = new MiniBatchOperationInProgress>(operations, + miniBatch = new MiniBatchOperationInProgress<>(operations, retCodeDetails, walEditsFromCoprocessors, 7, 10); try { miniBatch.setWalEdit(-1, new WALEdit()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index ff6f09b2645..8afdec923fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -124,7 +124,7 @@ public class TestMultiColumnScanner { @Parameters public static final Collection parameters() { - List parameters = new ArrayList(); + List parameters = new ArrayList<>(); for (Object[] bloomAndCompressionParams : HBaseTestingUtility.BLOOM_AND_COMPRESSION_COMBINATIONS) { for (boolean useDataBlockEncoding : new boolean[]{false, true}) { @@ -154,15 +154,15 @@ public class TestMultiColumnScanner { ); List rows = sequentialStrings("row", NUM_ROWS); List qualifiers = sequentialStrings("qual", NUM_COLUMNS); - List kvs = new ArrayList(); - Set keySet = new HashSet(); + List kvs = new ArrayList<>(); + Set keySet = new HashSet<>(); // A map from _ to the most recent delete timestamp for // that column. - Map lastDelTimeMap = new HashMap(); + Map lastDelTimeMap = new HashMap<>(); Random rand = new Random(29372937L); - Set rowQualSkip = new HashSet(); + Set rowQualSkip = new HashSet<>(); // Skip some columns in some rows. We need to test scanning over a set // of columns when some of the columns are not there. @@ -228,7 +228,7 @@ public class TestMultiColumnScanner { for (int columnBitMask = 1; columnBitMask <= MAX_COLUMN_BIT_MASK; ++columnBitMask) { Scan scan = new Scan(); scan.setMaxVersions(maxVersions); - Set qualSet = new TreeSet(); + Set qualSet = new TreeSet<>(); { int columnMaskTmp = columnBitMask; for (String qual : qualifiers) { @@ -242,7 +242,7 @@ public class TestMultiColumnScanner { } InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); int kvPos = 0; int numResults = 0; @@ -317,7 +317,7 @@ public class TestMultiColumnScanner { } private static List sequentialStrings(String prefix, int n) { - List lst = new ArrayList(); + List lst = new ArrayList<>(); for (int i = 0; i < n; ++i) { StringBuilder sb = new StringBuilder(); sb.append(prefix + i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java index 67be03296b8..d8f75ed53f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java @@ -117,8 +117,7 @@ public class TestRegionFavoredNodes { List regions = server.getOnlineRegions(TABLE_NAME); for (Region region : regions) { ListfavoredNodes = - new ArrayList(3); + new ArrayList<>(3); String encodedRegionName = region.getRegionInfo().getEncodedName(); for (int j = 0; j < FAVORED_NODES_NUM; j++) { org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder b = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java index 1583bf84d53..5d11c0ece47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java @@ -193,7 +193,7 @@ public class TestRegionIncrement { threads[i].join(); } RegionScanner regionScanner = region.getScanner(new Scan()); - List cells = new ArrayList(THREAD_COUNT); + List cells = new ArrayList<>(THREAD_COUNT); while(regionScanner.next(cells)) continue; assertEquals(THREAD_COUNT, cells.size()); long total = 0; @@ -230,7 +230,7 @@ public class TestRegionIncrement { threads[i].join(); } RegionScanner regionScanner = region.getScanner(new Scan()); - List cells = new ArrayList(100); + List cells = new ArrayList<>(100); while(regionScanner.next(cells)) continue; assertEquals(THREAD_COUNT, cells.size()); long total = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 3e6d180fac2..358aabd74c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -382,11 +382,11 @@ public class TestRegionMergeTransactionOnCluster { List> currentRegionToServers = MetaTableAccessor.getTableRegionsAndLocations( TEST_UTIL.getConnection(), tableName); - List initialRegions = new ArrayList(); + List initialRegions = new ArrayList<>(); for (Pair p : initialRegionToServers) { initialRegions.add(p.getFirst()); } - List currentRegions = new ArrayList(); + List currentRegions = new ArrayList<>(); for (Pair p : currentRegionToServers) { currentRegions.add(p.getFirst()); } @@ -427,7 +427,7 @@ public class TestRegionMergeTransactionOnCluster { ADMIN.mergeRegionsAsync( regionA.getEncodedNameAsBytes(), regionB.getEncodedNameAsBytes(), false); - return new PairOfSameType(regionA, regionB); + return new PairOfSameType<>(regionA, regionB); } private void waitAndVerifyRegionNum(HMaster master, TableName tablename, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java index abe6c6ce02e..69b7581491a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java @@ -283,7 +283,7 @@ public class TestRegionReplicaFailover { admin.flush(table.getName()); HTU.loadNumericRows(table, fam, 1000, 2000); - final AtomicReference ex = new AtomicReference(null); + final AtomicReference ex = new AtomicReference<>(null); final AtomicBoolean done = new AtomicBoolean(false); final AtomicInteger key = new AtomicInteger(2000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index 91661018585..642cc14c924 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -329,7 +329,7 @@ public class TestRegionReplicas { @SuppressWarnings("unchecked") final AtomicReference[] exceptions = new AtomicReference[3]; for (int i=0; i < exceptions.length; i++) { - exceptions[i] = new AtomicReference(); + exceptions[i] = new AtomicReference<>(); } Runnable writer = new Runnable() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java index b2909e2aaf8..89f7589cc76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java @@ -63,7 +63,7 @@ public class TestRegionSplitPolicy { mockRegion = Mockito.mock(HRegion.class); Mockito.doReturn(htd).when(mockRegion).getTableDesc(); Mockito.doReturn(hri).when(mockRegion).getRegionInfo(); - stores = new ArrayList(); + stores = new ArrayList<>(); Mockito.doReturn(stores).when(mockRegion).getStores(); } @@ -103,7 +103,7 @@ public class TestRegionSplitPolicy { // Now make it so the mock region has a RegionServerService that will // return 'online regions'. RegionServerServices rss = Mockito.mock(RegionServerServices.class); - final List regions = new ArrayList(); + final List regions = new ArrayList<>(); Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions); Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss); // Set max size for this 'table'. @@ -162,7 +162,7 @@ public class TestRegionSplitPolicy { conf.setFloat("hbase.busy.policy.blockedRequests", 0.1f); RegionServerServices rss = Mockito.mock(RegionServerServices.class); - final List regions = new ArrayList(); + final List regions = new ArrayList<>(); Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions); Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss); Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(0L); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java index ea16edf0175..69965ba3b39 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java @@ -435,7 +435,7 @@ public class TestReversibleScanners { private void verifyCountAndOrder(InternalScanner scanner, int expectedKVCount, int expectedRowCount, boolean forward) throws IOException { - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); Result lastResult = null; int rowCount = 0; int kvCount = 0; @@ -502,8 +502,7 @@ public class TestReversibleScanners { .getScannersForStoreFiles(Lists.newArrayList(sf1, sf2), false, true, false, false, readPoint); List memScanners = memstore.getScanners(readPoint); - List scanners = new ArrayList( - fileScanners.size() + 1); + List scanners = new ArrayList<>(fileScanners.size() + 1); scanners.addAll(fileScanners); scanners.addAll(memScanners); @@ -611,7 +610,7 @@ public class TestReversibleScanners { for (int i = startRowNum; i >= 0; i--) { for (int j = (i == startRowNum ? startQualNum : 0); j < QUALSIZE; j++) { if (makeMVCC(i, j) <= readPoint) { - nextReadableNum = new Pair(i, j); + nextReadableNum = new Pair<>(i, j); findExpected = true; break; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java index 8a31af8f6e9..4cf2964e7af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSCVFWithMiniCluster.java @@ -85,7 +85,7 @@ public class TestSCVFWithMiniCluster { htable = util.getConnection().getTable(HBASE_TABLE_NAME); /* Add some values */ - List puts = new ArrayList(); + List puts = new ArrayList<>(); /* Add a row with 'a:foo' = false */ Put put = new Put(Bytes.toBytes("1")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index 027193f4748..72267beecb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -74,7 +74,7 @@ public class TestScanWithBloomError { private static final String ROW = "theRow"; private static final String QUALIFIER_PREFIX = "qual"; private static final byte[] ROW_BYTES = Bytes.toBytes(ROW); - private static NavigableSet allColIds = new TreeSet(); + private static NavigableSet allColIds = new TreeSet<>(); private Region region; private BloomType bloomType; private FileSystem fs; @@ -84,7 +84,7 @@ public class TestScanWithBloomError { @Parameters public static final Collection parameters() { - List configurations = new ArrayList(); + List configurations = new ArrayList<>(); for (BloomType bloomType : BloomType.values()) { configurations.add(new Object[] { bloomType }); } @@ -160,24 +160,24 @@ public class TestScanWithBloomError { + lastStoreFileReader.getHFileReader().getName()); lastStoreFileReader.disableBloomFilterForTesting(); - List allResults = new ArrayList(); + List allResults = new ArrayList<>(); { // Limit the scope of results. - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results) || results.size() > 0) { allResults.addAll(results); results.clear(); } } - List actualIds = new ArrayList(); + List actualIds = new ArrayList<>(); for (Cell kv : allResults) { String qual = Bytes.toString(CellUtil.cloneQualifier(kv)); assertTrue(qual.startsWith(QUALIFIER_PREFIX)); actualIds.add(Integer.valueOf(qual.substring( QUALIFIER_PREFIX.length()))); } - List expectedIds = new ArrayList(); + List expectedIds = new ArrayList<>(); for (int expectedId : expectedResultCols) expectedIds.add(expectedId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 1b427542eb5..d9fd3dea216 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -132,7 +132,7 @@ public class TestScanner { try { this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null); HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY); - List results = new ArrayList(); + List results = new ArrayList<>(); // Do simple test of getting one row only first. Scan scan = new Scan(Bytes.toBytes("abc"), Bytes.toBytes("abd")); scan.addFamily(HConstants.CATALOG_FAMILY); @@ -151,7 +151,7 @@ public class TestScanner { s = region.getScanner(scan); count = 0; Cell kv = null; - results = new ArrayList(); + results = new ArrayList<>(); for (boolean first = true; s.next(results);) { kv = results.get(0); if (first) { @@ -170,7 +170,7 @@ public class TestScanner { } void rowPrefixFilter(Scan scan) throws IOException { - List results = new ArrayList(); + List results = new ArrayList<>(); scan.addFamily(HConstants.CATALOG_FAMILY); InternalScanner s = region.getScanner(scan); boolean hasMore = true; @@ -186,7 +186,7 @@ public class TestScanner { } void rowInclusiveStopFilter(Scan scan, byte[] stopRow) throws IOException { - List results = new ArrayList(); + List results = new ArrayList<>(); scan.addFamily(HConstants.CATALOG_FAMILY); InternalScanner s = region.getScanner(scan); boolean hasMore = true; @@ -234,7 +234,7 @@ public class TestScanner { HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY); Scan scan = new Scan(); InternalScanner s = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); try { s.next(results); s.close(); @@ -376,7 +376,7 @@ public class TestScanner { throws IOException { InternalScanner scanner = null; Scan scan = null; - List results = new ArrayList(); + List results = new ArrayList<>(); byte [][][] scanColumns = { COLS, EXPLICIT_COLS @@ -540,7 +540,7 @@ public class TestScanner { // run a major compact, column1 of firstRow will be cleaned. region.compact(true); - List results = new ArrayList(); + List results = new ArrayList<>(); s.next(results); // make sure returns column2 of firstRow @@ -549,7 +549,7 @@ public class TestScanner { assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); assertTrue(CellUtil.matchingFamily(results.get(0), fam2)); - results = new ArrayList(); + results = new ArrayList<>(); s.next(results); // get secondRow diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java index ac33f15573e..9bda019e41e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java @@ -128,7 +128,7 @@ public class TestScannerRetriableFailure { } public void loadTable(final Table table, int numRows) throws IOException { - List puts = new ArrayList(numRows); + List puts = new ArrayList<>(numRows); for (int i = 0; i < numRows; ++i) { byte[] row = Bytes.toBytes(String.format("%09d", i)); Put put = new Put(row); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index b31be9daecd..67f6f34ae0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -111,9 +111,9 @@ public class TestSeekOptimizations { private Put put; private Delete del; private Random rand; - private Set putTimestamps = new HashSet(); - private Set delTimestamps = new HashSet(); - private List expectedKVs = new ArrayList(); + private Set putTimestamps = new HashSet<>(); + private Set delTimestamps = new HashSet<>(); + private List expectedKVs = new ArrayList<>(); private Compression.Algorithm comprAlgo; private BloomType bloomType; @@ -199,7 +199,7 @@ public class TestSeekOptimizations { throws IOException { StoreScanner.enableLazySeekGlobally(lazySeekEnabled); final Scan scan = new Scan(); - final Set qualSet = new HashSet(); + final Set qualSet = new HashSet<>(); for (int iColumn : columnArr) { String qualStr = getQualStr(iColumn); scan.addColumn(FAMILY_BYTES, Bytes.toBytes(qualStr)); @@ -217,8 +217,8 @@ public class TestSeekOptimizations { final long initialSeekCount = StoreFileScanner.getSeekCount(); final InternalScanner scanner = region.getScanner(scan); - final List results = new ArrayList(); - final List actualKVs = new ArrayList(); + final List results = new ArrayList<>(); + final List actualKVs = new ArrayList<>(); // Such a clumsy do-while loop appears to be the official way to use an // internalScanner. scanner.next() return value refers to the _next_ @@ -260,8 +260,8 @@ public class TestSeekOptimizations { private List filterExpectedResults(Set qualSet, byte[] startRow, byte[] endRow, int maxVersions) { - final List filteredKVs = new ArrayList(); - final Map verCount = new HashMap(); + final List filteredKVs = new ArrayList<>(); + final Map verCount = new HashMap<>(); for (Cell kv : expectedKVs) { if (startRow.length > 0 && Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), @@ -297,7 +297,7 @@ public class TestSeekOptimizations { } private void prepareExpectedKVs(long latestDelTS) { - final List filteredKVs = new ArrayList(); + final List filteredKVs = new ArrayList<>(); for (Cell kv : expectedKVs) { if (kv.getTimestamp() > latestDelTS || latestDelTS == -1) { filteredKVs.add(kv); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 46c1dd5a7c4..4c0981070f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -112,11 +112,10 @@ public class TestStore { byte [] qf5 = Bytes.toBytes("qf5"); byte [] qf6 = Bytes.toBytes("qf6"); - NavigableSet qualifiers = - new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR); + NavigableSet qualifiers = new ConcurrentSkipListSet<>(Bytes.BYTES_COMPARATOR); - List expected = new ArrayList(); - List result = new ArrayList(); + List expected = new ArrayList<>(); + List result = new ArrayList<>(); long id = System.currentTimeMillis(); Get get = new Get(row); @@ -624,8 +623,7 @@ public class TestStore { * only; thereafter it will succeed. Used by {@link TestHRegion} too. */ static class FaultyFileSystem extends FilterFileSystem { - List> outStreams = - new ArrayList>(); + List> outStreams = new ArrayList<>(); private long faultPos = 200; AtomicBoolean fault = new AtomicBoolean(true); @@ -699,7 +697,7 @@ public class TestStore { */ List getKeyValueSet(long[] timestamps, int numRows, byte[] qualifier, byte[] family) { - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); for (int i=1;i<=numRows;i++) { byte[] b = Bytes.toBytes(i); for (long timestamp: timestamps) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index 0a8dbc416e0..7e4ebd8e61d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -512,7 +512,7 @@ public class TestStoreFile extends HBaseTestCase { int falseNeg = 0; for (int i = 0; i < 2000; i++) { String row = String.format(localFormatter, i); - TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); columns.add("family:col".getBytes()); Scan scan = new Scan(row.getBytes(),row.getBytes()); @@ -712,7 +712,7 @@ public class TestStoreFile extends HBaseTestCase { for (int j = 0; j < colCount*2; ++j) { // column qualifiers String row = String.format(localFormatter, i); String col = String.format(localFormatter, j); - TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); columns.add(("col" + col).getBytes()); Scan scan = new Scan(row.getBytes(),row.getBytes()); @@ -799,7 +799,7 @@ public class TestStoreFile extends HBaseTestCase { */ List getKeyValueSet(long[] timestamps, int numRows, byte[] qualifier, byte[] family) { - List kvList = new ArrayList(); + List kvList = new ArrayList<>(); for (int i=1;i<=numRows;i++) { byte[] b = Bytes.toBytes(i) ; LOG.info(Bytes.toString(b)); @@ -851,7 +851,7 @@ public class TestStoreFile extends HBaseTestCase { when(store.getFamily()).thenReturn(hcd); StoreFileReader reader = hsf.createReader(); StoreFileScanner scanner = getStoreFileScanner(reader, false, false); - TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); columns.add(qualifier); scan.setTimeRange(20, 100); @@ -1019,7 +1019,7 @@ public class TestStoreFile extends HBaseTestCase { throws IOException { // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs int numKVs = 5 * numBlocks; - List kvs = new ArrayList(numKVs); + List kvs = new ArrayList<>(numKVs); byte [] b = Bytes.toBytes("x"); int totalSize = 0; for (int i=numKVs;i>0;i--) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java index d96fd9ffc1c..3cdb227dbcb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java @@ -170,7 +170,7 @@ public class TestStoreFileRefresherChore { byte[] qf = Bytes.toBytes("cq"); HRegionServer regionServer = mock(HRegionServer.class); - List regions = new ArrayList(); + List regions = new ArrayList<>(); when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index 52efe63a1c0..ccbf06786ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -173,7 +173,7 @@ public class TestStoreScanner { * @return */ NavigableSet getCols(String ...strCols) { - NavigableSet cols = new TreeSet(Bytes.BYTES_COMPARATOR); + NavigableSet cols = new TreeSet<>(Bytes.BYTES_COMPARATOR); for (String col : strCols) { byte[] bytes = Bytes.toBytes(col); cols.add(bytes); @@ -189,7 +189,7 @@ public class TestStoreScanner { Scan scan = new Scan(get); CellGridStoreScanner scanner = new CellGridStoreScanner(scan, this.scanInfo, this.scanType); try { - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results)) { continue; } @@ -213,7 +213,7 @@ public class TestStoreScanner { Scan scan = new Scan(get); CellGridStoreScanner scanner = new CellGridStoreScanner(scan, this.scanInfo, this.scanType); try { - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results)) { continue; } @@ -242,7 +242,7 @@ public class TestStoreScanner { scan.addColumn(CF, ONE); CellGridStoreScanner scanner = new CellGridStoreScanner(scan, this.scanInfo, this.scanType); try { - List results = new ArrayList(); + List results = new ArrayList<>(); while (scanner.next(results)) { continue; } @@ -276,7 +276,7 @@ public class TestStoreScanner { Scan scan = new Scan(get); CellGridStoreScanner scanner = new CellGridStoreScanner(scan, this.scanInfo, this.scanType); try { - List results = new ArrayList(); + List results = new ArrayList<>(); // For a Get there should be no more next's after the first call. Assert.assertEquals(false, scanner.next(results)); // Should be one result only. @@ -307,7 +307,7 @@ public class TestStoreScanner { Scan scan = new Scan(get); CellGridStoreScanner scanner = new CellGridStoreScanner(scan, this.scanInfo, this.scanType); try { - List results = new ArrayList(); + List results = new ArrayList<>(); // For a Get there should be no more next's after the first call. Assert.assertEquals(false, scanner.next(results)); // Should be one result only. @@ -341,7 +341,7 @@ public class TestStoreScanner { List results = null; try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - results = new ArrayList(); + results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(5, results.size()); Assert.assertEquals(kvs[kvs.length - 1], results.get(0)); @@ -352,7 +352,7 @@ public class TestStoreScanner { scanSpec.setMaxVersions(); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - results = new ArrayList(); + results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(2, results.size()); } @@ -362,7 +362,7 @@ public class TestStoreScanner { scanSpec.setMaxVersions(); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - results = new ArrayList(); + results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(1, results.size()); } @@ -373,7 +373,7 @@ public class TestStoreScanner { scanSpec.setMaxVersions(3); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - results = new ArrayList(); + results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(3, results.size()); } @@ -395,7 +395,7 @@ public class TestStoreScanner { // this only uses maxVersions (default=1) and TimeRange (default=all) try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(1, results.size()); Assert.assertEquals(kvs[0], results.get(0)); @@ -423,7 +423,7 @@ public class TestStoreScanner { // this only uses maxVersions (default=1) and TimeRange (default=all) try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); scan.next(results); Assert.assertEquals(1, results.size()); Assert.assertEquals(kvs[0], results.get(0)); @@ -451,7 +451,7 @@ public class TestStoreScanner { Scan scanSpec = new Scan(Bytes.toBytes("R1")); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertFalse(scan.next(results)); Assert.assertEquals(0, results.size()); } @@ -472,7 +472,7 @@ public class TestStoreScanner { Scan scanSpec = new Scan(Bytes.toBytes("R1")); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(0, results.size()); @@ -499,7 +499,7 @@ public class TestStoreScanner { try (StoreScanner scan = new StoreScanner(new Scan(Bytes.toBytes("R1")), scanInfo, scanType, getCols("a"), scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); // the two put at ts=now will be masked by the 1 delete, and // since the scan default returns 1 version we'll return the newest // key, which is kvs[2], now-100. @@ -525,7 +525,7 @@ public class TestStoreScanner { Scan scanSpec = new Scan(Bytes.toBytes("R1")).setMaxVersions(2); try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(2, results.size()); Assert.assertEquals(kvs2[1], results.get(0)); @@ -543,7 +543,7 @@ public class TestStoreScanner { List scanners = scanFixture(kvs); try (StoreScanner scan = new StoreScanner(new Scan(Bytes.toBytes("R1")), scanInfo, scanType, null, scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(2, results.size()); Assert.assertEquals(kvs[0], results.get(0)); @@ -574,7 +574,7 @@ public class TestStoreScanner { List scanners = scanFixture(kvs); try (StoreScanner scan = new StoreScanner(new Scan().setMaxVersions(2), scanInfo, scanType, null, scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(5, results.size()); Assert.assertEquals(kvs[0], results.get(0)); @@ -605,7 +605,7 @@ public class TestStoreScanner { try (StoreScanner scan = new StoreScanner(new Scan().setMaxVersions(Integer.MAX_VALUE), scanInfo, scanType, null, scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(0, results.size()); Assert.assertEquals(true, scan.next(results)); @@ -627,7 +627,7 @@ public class TestStoreScanner { List scanners = scanFixture(kvs); try (StoreScanner scan = new StoreScanner(new Scan(), scanInfo, scanType, null, scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(1, results.size()); Assert.assertEquals(kvs[3], results.get(0)); @@ -652,7 +652,7 @@ public class TestStoreScanner { List scanners = scanFixture(kvs); try (StoreScanner scan = new StoreScanner(new Scan(), scanInfo, scanType, getCols("a", "d"), scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scan.next(results)); Assert.assertEquals(2, results.size()); Assert.assertEquals(kvs[0], results.get(0)); @@ -692,7 +692,7 @@ public class TestStoreScanner { CellComparator.COMPARATOR); ScanType scanType = ScanType.USER_SCAN; try (StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, null, scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scanner.next(results)); Assert.assertEquals(2, results.size()); Assert.assertEquals(kvs[1], results.get(0)); @@ -720,9 +720,9 @@ public class TestStoreScanner { // normally cause an NPE because scan.store is null. So as long as we get through these // two calls we are good and the bug was quashed. - scan.updateReaders(new ArrayList()); + scan.updateReaders(new ArrayList<>()); - scan.updateReaders(new ArrayList()); + scan.updateReaders(new ArrayList<>()); scan.peek(); } @@ -767,7 +767,7 @@ public class TestStoreScanner { try (StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, null, scanners)) { - List results = new ArrayList(); + List results = new ArrayList<>(); Assert.assertEquals(true, scanner.next(results)); Assert.assertEquals(1, results.size()); Assert.assertEquals(kvs[1], results.get(0)); @@ -833,8 +833,8 @@ public class TestStoreScanner { new StoreScanner(scan, scanInfo, ScanType.COMPACT_DROP_DELETES, null, scanners, HConstants.OLDEST_TIMESTAMP)) { - List results = new ArrayList(); - results = new ArrayList(); + List results = new ArrayList<>(); + results = new ArrayList<>(); Assert.assertEquals(true, scanner.next(results)); Assert.assertEquals(kvs[0], results.get(0)); Assert.assertEquals(kvs[2], results.get(1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index 3e3eef98d66..b2739e1e07d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -78,7 +78,7 @@ public class TestStripeStoreEngine { mockCompactor.compact(any(CompactionRequest.class), anyInt(), anyLong(), any(byte[].class), any(byte[].class), any(byte[].class), any(byte[].class), any(ThroughputController.class), any(User.class))) - .thenReturn(new ArrayList()); + .thenReturn(new ArrayList<>()); // Produce 3 L0 files. StoreFile sf = createFile(); @@ -118,6 +118,6 @@ public class TestStripeStoreEngine { } private static ArrayList al(StoreFile... sfs) { - return new ArrayList(Arrays.asList(sfs)); + return new ArrayList<>(Arrays.asList(sfs)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java index c533257892d..a6ce270a8d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java @@ -115,7 +115,7 @@ public class TestStripeStoreFileManager { } private static ArrayList dumpIterator(Iterator iter) { - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); for (; iter.hasNext(); result.add(iter.next())); return result; } @@ -233,7 +233,7 @@ public class TestStripeStoreFileManager { private void verifySplitPointScenario(int splitPointAfter, boolean shouldSplitStripe, float splitRatioToVerify, int... sizes) throws Exception { assertTrue(sizes.length > 1); - ArrayList sfs = new ArrayList(); + ArrayList sfs = new ArrayList<>(); for (int sizeIx = 0; sizeIx < sizes.length; ++sizeIx) { byte[] startKey = (sizeIx == 0) ? OPEN_KEY : Bytes.toBytes(sizeIx - 1); byte[] endKey = (sizeIx == sizes.length - 1) ? OPEN_KEY : Bytes.toBytes(sizeIx); @@ -525,7 +525,7 @@ public class TestStripeStoreFileManager { sfm.insertNewFiles(al(createFile())); } for (int i = 0; i < filesInStripe; ++i) { - ArrayList stripe = new ArrayList(); + ArrayList stripe = new ArrayList<>(); for (int j = 0; j < stripes; ++j) { stripe.add(createFile( (j == 0) ? OPEN_KEY : keys[j - 1], (j == stripes - 1) ? OPEN_KEY : keys[j])); @@ -597,7 +597,7 @@ public class TestStripeStoreFileManager { } private static StripeStoreFileManager createManager() throws Exception { - return createManager(new ArrayList()); + return createManager(new ArrayList<>()); } private static StripeStoreFileManager createManager(ArrayList sfs) throws Exception { @@ -615,11 +615,11 @@ public class TestStripeStoreFileManager { } private static ArrayList al(StoreFile... sfs) { - return new ArrayList(Arrays.asList(sfs)); + return new ArrayList<>(Arrays.asList(sfs)); } private static ArrayList flattenLists(ArrayList... sfls) { - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); for (ArrayList sfl : sfls) { result.addAll(sfl); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index 4d531ace94a..40eebb69717 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -421,7 +421,7 @@ public class TestTags { assertEquals(5L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength())); assertEquals(2, tags.size()); // We cannot assume the ordering of tags - List tagValues = new ArrayList(); + List tagValues = new ArrayList<>(); for (Tag tag: tags) { tagValues.add(Bytes.toString(TagUtil.cloneValue(tag))); } @@ -557,7 +557,7 @@ public class TestTags { private void updateMutationAddingTags(final Mutation m) { byte[] attribute = m.getAttribute("visibility"); byte[] cf = null; - List updatedCells = new ArrayList(); + List updatedCells = new ArrayList<>(); if (attribute != null) { for (List edits : m.getFamilyCellMap().values()) { for (Cell cell : edits) { @@ -566,7 +566,7 @@ public class TestTags { cf = CellUtil.cloneFamily(kv); } Tag tag = new ArrayBackedTag((byte) 1, attribute); - List tagList = new ArrayList(); + List tagList = new ArrayList<>(); tagList.add(tag); KeyValue newKV = new KeyValue(CellUtil.cloneRow(kv), 0, kv.getRowLength(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index 4821c74e38d..4f247b0cb37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -218,7 +218,7 @@ public class TestWALLockup { HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME); final HRegion region = initHRegion(tableName, null, null, dodgyWAL); byte [] bytes = Bytes.toBytes(getName()); - NavigableMap scopes = new TreeMap( + NavigableMap scopes = new TreeMap<>( Bytes.BYTES_COMPARATOR); scopes.put(COLUMN_FAMILY_BYTES, 0); MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); @@ -398,7 +398,7 @@ public class TestWALLockup { HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME); final HRegion region = initHRegion(tableName, null, null, dodgyWAL1); byte[] bytes = Bytes.toBytes(getName()); - NavigableMap scopes = new TreeMap( + NavigableMap scopes = new TreeMap<>( Bytes.BYTES_COMPARATOR); scopes.put(COLUMN_FAMILY_BYTES, 0); MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index f598a8dfab7..cdf84d29900 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -94,7 +94,7 @@ public class TestWideScanner extends HBaseTestCase { try { this.r = createNewHRegion(TESTTABLEDESC, null, null); int inserted = addWideContent(this.r); - List results = new ArrayList(); + List results = new ArrayList<>(); Scan scan = new Scan(); scan.addFamily(A); scan.addFamily(B); @@ -130,7 +130,7 @@ public class TestWideScanner extends HBaseTestCase { ((HRegion.RegionScannerImpl)s).storeHeap.getHeap().iterator(); while (scanners.hasNext()) { StoreScanner ss = (StoreScanner)scanners.next(); - ss.updateReaders(new ArrayList()); + ss.updateReaders(new ArrayList<>()); } } while (more); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java index 68d57afd7ca..5014b414153 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/ConstantSizeFileListGenerator.java @@ -49,7 +49,7 @@ class ConstantSizeFileListGenerator extends StoreFileListGenerator { @Override public List next() { count += 1; - ArrayList files = new ArrayList(NUM_FILES_GEN); + ArrayList files = new ArrayList<>(NUM_FILES_GEN); for (int i = 0; i < NUM_FILES_GEN; i++) { files.add(createMockStoreFile(FILESIZE)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java index 9a4bb8e5499..46bb639182a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/EverythingPolicy.java @@ -46,9 +46,9 @@ public class EverythingPolicy extends RatioBasedCompactionPolicy { final boolean mayUseOffPeak, final boolean mayBeStuck) throws IOException { if (candidates.size() < comConf.getMinFilesToCompact()) { - return new ArrayList(0); + return new ArrayList<>(0); } - return new ArrayList(candidates); + return new ArrayList<>(candidates); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java index a19e9ad8dc8..fb8c30a3ffe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/GaussianFileListGenerator.java @@ -47,7 +47,7 @@ class GaussianFileListGenerator extends StoreFileListGenerator { @Override public List next() { count += 1; - ArrayList files = new ArrayList(NUM_FILES_GEN); + ArrayList files = new ArrayList<>(NUM_FILES_GEN); for (int i = 0; i < NUM_FILES_GEN; i++) { files.add(createMockStoreFile( (int) Math.ceil(Math.max(0, gen.nextNormalizedDouble() * 32 + 32))) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java index 663714a1d83..cb97d276818 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java @@ -46,7 +46,7 @@ class MockStoreFileGenerator { } protected List createStoreFileList(final int[] fs) { - List storeFiles = new LinkedList(); + List storeFiles = new LinkedList<>(); for (int fileSize : fs) { storeFiles.add(createMockStoreFile(fileSize)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 3fcd3fe27a4..0a84fe96e84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -82,7 +82,7 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator { int[] minFilesValues = new int[] {3}; float[] ratioValues = new float[] {1.2f}; - List params = new ArrayList( + List params = new ArrayList<>( maxFileValues.length * minFilesValues.length * fileListGenClasses.length @@ -152,7 +152,7 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator { public final void testSelection() throws Exception { long fileDiff = 0; for (List storeFileList : generator) { - List currentFiles = new ArrayList(18); + List currentFiles = new ArrayList<>(18); for (StoreFile file : storeFileList) { currentFiles.add(file); currentFiles = runIteration(currentFiles); @@ -175,16 +175,16 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator { private List runIteration(List startingStoreFiles) throws IOException { - List storeFiles = new ArrayList(startingStoreFiles); + List storeFiles = new ArrayList<>(startingStoreFiles); CompactionRequest req = cp.selectCompaction( - storeFiles, new ArrayList(), false, false, false); + storeFiles, new ArrayList<>(), false, false, false); long newFileSize = 0; Collection filesToCompact = req.getFiles(); if (!filesToCompact.isEmpty()) { - storeFiles = new ArrayList(storeFiles); + storeFiles = new ArrayList<>(storeFiles); storeFiles.removeAll(filesToCompact); for (StoreFile storeFile : filesToCompact) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java index ed4531a3101..5fe47f3444f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SemiConstantSizeFileListGenerator.java @@ -42,7 +42,7 @@ class SemiConstantSizeFileListGenerator extends StoreFileListGenerator { @Override public List next() { count += 1; - ArrayList files = new ArrayList(NUM_FILES_GEN); + ArrayList files = new ArrayList<>(NUM_FILES_GEN); for (int i = 0; i < NUM_FILES_GEN; i++) { files.add(createMockStoreFile(random.nextInt(5) + 30)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java index 6afbb2f2de6..f5f36ac84d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SinusoidalFileListGenerator.java @@ -43,7 +43,7 @@ class SinusoidalFileListGenerator extends StoreFileListGenerator { @Override public List next() { count += 1; - ArrayList files = new ArrayList(NUM_FILES_GEN); + ArrayList files = new ArrayList<>(NUM_FILES_GEN); for (int x = 0; x < NUM_FILES_GEN; x++) { int fileSize = (int) Math.abs(64 * Math.sin((Math.PI * x) / 50.0)) + 1; files.add(createMockStoreFile(fileSize)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java index ebaa7115a14..5201eb790ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/SpikyFileListGenerator.java @@ -43,7 +43,7 @@ class SpikyFileListGenerator extends StoreFileListGenerator { @Override public List next() { count += 1; - ArrayList files = new ArrayList(NUM_FILES_GEN); + ArrayList files = new ArrayList<>(NUM_FILES_GEN); for (int x = 0; x < NUM_FILES_GEN; x++) { int fileSize = random.nextInt(5) + 1; if ( x % 10 == 0) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java index aa5a20e158f..08fc7bff8d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java @@ -77,7 +77,7 @@ public class TestCompactedHFilesDischarger { Path path = testUtil.getDataTestDir(getClass().getSimpleName()); region = HBaseTestingUtility.createRegionAndWAL(info, path, testUtil.getConfiguration(), htd); rss = mock(RegionServerServices.class); - List regions = new ArrayList(1); + List regions = new ArrayList<>(1); regions.add(region); when(rss.getOnlineRegions()).thenReturn(regions); } @@ -379,7 +379,7 @@ public class TestCompactedHFilesDischarger { RegionScanner resScanner = null; try { resScanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); boolean next = resScanner.next(results); try { counter.incrementAndGet(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java index 89f61d0ddf8..dff6919ca14 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java @@ -82,12 +82,12 @@ public class TestCompactor { public static class StoreFileWritersCapture implements Answer, StripeMultiFileWriter.WriterFactory { public static class Writer { - public ArrayList kvs = new ArrayList(); - public TreeMap data = new TreeMap(Bytes.BYTES_COMPARATOR); + public ArrayList kvs = new ArrayList<>(); + public TreeMap data = new TreeMap<>(Bytes.BYTES_COMPARATOR); public boolean hasMetadata; } - private List writers = new ArrayList(); + private List writers = new ArrayList<>(); @Override public StoreFileWriter createWriter() throws IOException { @@ -192,7 +192,7 @@ public class TestCompactor { private final ArrayList kvs; public Scanner(KeyValue... kvs) { - this.kvs = new ArrayList(Arrays.asList(kvs)); + this.kvs = new ArrayList<>(Arrays.asList(kvs)); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java index 38d9f9923b8..e5906396ad5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java @@ -156,7 +156,7 @@ public class TestDateTieredCompactor { StoreFileWritersCapture writers = new StoreFileWritersCapture(); CompactionRequest request = createDummyRequest(); DateTieredCompactor dtc = createCompactor(writers, new KeyValue[0], - new ArrayList(request.getFiles())); + new ArrayList<>(request.getFiles())); List paths = dtc.compact(request, Arrays.asList(Long.MIN_VALUE, Long.MAX_VALUE), NoLimitThroughputController.INSTANCE, null); assertEquals(1, paths.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 5fadee82778..f2d00b3490c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -415,7 +415,7 @@ public class TestStripeCompactionPolicy { private static StripeCompactionPolicy.StripeInformationProvider createStripesWithFiles( List... stripeFiles) throws Exception { return createStripesWithFiles(createBoundaries(stripeFiles.length), - Lists.newArrayList(stripeFiles), new ArrayList()); + Lists.newArrayList(stripeFiles), new ArrayList<>()); } @Test @@ -433,7 +433,7 @@ public class TestStripeCompactionPolicy { verifySingleStripeCompaction(policy, si, 0, false); // Unless there are enough to cause L0 compaction. si = createStripesWithSizes(6, 2, stripes); - ConcatenatedLists sfs = new ConcatenatedLists(); + ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addSublist(si.getLevel0Files()); sfs.addSublist(si.getStripes().get(0)); verifyCompaction( @@ -446,7 +446,7 @@ public class TestStripeCompactionPolicy { // if all files of stripe aren't selected, delete must not be dropped. stripes = new Long[][] { new Long[] { 100L, 3L, 2L, 2L, 2L }, new Long[] { 6L } }; si = createStripesWithSizes(0, 0, stripes); - List compact_file = new ArrayList(); + List compact_file = new ArrayList<>(); Iterator iter = si.getStripes().get(0).listIterator(1); while (iter.hasNext()) { compact_file.add(iter.next()); @@ -472,7 +472,7 @@ public class TestStripeCompactionPolicy { } private static ArrayList al(StoreFile... sfs) { - return new ArrayList(Arrays.asList(sfs)); + return new ArrayList<>(Arrays.asList(sfs)); } private void verifyMergeCompatcion(StripeCompactionPolicy policy, StripeInformationProvider si, @@ -619,7 +619,7 @@ public class TestStripeCompactionPolicy { private static List getAllFiles( StripeInformationProvider si, int fromStripe, int toStripe) { - ArrayList expected = new ArrayList(); + ArrayList expected = new ArrayList<>(); for (int i = fromStripe; i <= toStripe; ++i) { expected.addAll(si.getStripes().get(i)); } @@ -633,11 +633,11 @@ public class TestStripeCompactionPolicy { */ private static StripeInformationProvider createStripes( int l0Count, byte[]... boundaries) throws Exception { - List l0Sizes = new ArrayList(); + List l0Sizes = new ArrayList<>(); for (int i = 0; i < l0Count; ++i) { l0Sizes.add(5L); } - List> sizes = new ArrayList>(); + List> sizes = new ArrayList<>(); for (int i = 0; i <= boundaries.length; ++i) { sizes.add(Arrays.asList(Long.valueOf(5))); } @@ -651,11 +651,11 @@ public class TestStripeCompactionPolicy { */ private static StripeInformationProvider createStripesL0Only( int l0Count, long l0Size) throws Exception { - List l0Sizes = new ArrayList(); + List l0Sizes = new ArrayList<>(); for (int i = 0; i < l0Count; ++i) { l0Sizes.add(l0Size); } - return createStripes(null, new ArrayList>(), l0Sizes); + return createStripes(null, new ArrayList<>(), l0Sizes); } /** @@ -666,7 +666,7 @@ public class TestStripeCompactionPolicy { */ private static StripeInformationProvider createStripesWithSizes( int l0Count, long l0Size, Long[]... sizes) throws Exception { - ArrayList> sizeList = new ArrayList>(sizes.length); + ArrayList> sizeList = new ArrayList<>(sizes.length); for (Long[] size : sizes) { sizeList.add(Arrays.asList(size)); } @@ -676,7 +676,7 @@ public class TestStripeCompactionPolicy { private static StripeInformationProvider createStripesWithSizes( int l0Count, long l0Size, List> sizes) throws Exception { List boundaries = createBoundaries(sizes.size()); - List l0Sizes = new ArrayList(); + List l0Sizes = new ArrayList<>(); for (int i = 0; i < l0Count; ++i) { l0Sizes.add(l0Size); } @@ -686,22 +686,22 @@ public class TestStripeCompactionPolicy { private static List createBoundaries(int stripeCount) { byte[][] keys = new byte[][] { KEY_A, KEY_B, KEY_C, KEY_D, KEY_E }; assert stripeCount <= keys.length + 1; - List boundaries = new ArrayList(); + List boundaries = new ArrayList<>(); boundaries.addAll(Arrays.asList(keys).subList(0, stripeCount - 1)); return boundaries; } private static StripeInformationProvider createStripes(List boundaries, List> stripeSizes, List l0Sizes) throws Exception { - List> stripeFiles = new ArrayList>(stripeSizes.size()); + List> stripeFiles = new ArrayList<>(stripeSizes.size()); for (List sizes : stripeSizes) { - List sfs = new ArrayList(sizes.size()); + List sfs = new ArrayList<>(sizes.size()); for (Long size : sizes) { sfs.add(createFile(size)); } stripeFiles.add(sfs); } - List l0Files = new ArrayList(); + List l0Files = new ArrayList<>(); for (Long size : l0Sizes) { l0Files.add(createFile(size)); } @@ -713,8 +713,8 @@ public class TestStripeCompactionPolicy { */ private static StripeInformationProvider createStripesWithFiles(List boundaries, List> stripeFiles, List l0Files) throws Exception { - ArrayList> stripes = new ArrayList>(); - ArrayList boundariesList = new ArrayList(); + ArrayList> stripes = new ArrayList<>(); + ArrayList boundariesList = new ArrayList<>(); StripeInformationProvider si = mock(StripeInformationProvider.class); if (!stripeFiles.isEmpty()) { assert stripeFiles.size() == (boundaries.size() + 1); @@ -731,7 +731,7 @@ public class TestStripeCompactionPolicy { when(si.getEndRow(eq(i))).thenReturn(endKey); } } - ConcatenatedLists sfs = new ConcatenatedLists(); + ConcatenatedLists sfs = new ConcatenatedLists<>(); sfs.addAllSublists(stripes); sfs.addSublist(l0Files); when(si.getStorefiles()).thenReturn(sfs); @@ -803,7 +803,7 @@ public class TestStripeCompactionPolicy { private final ArrayList kvs; public Scanner(KeyValue... kvs) { - this.kvs = new ArrayList(Arrays.asList(kvs)); + this.kvs = new ArrayList<>(Arrays.asList(kvs)); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java index 4b82940ef6d..088c9586252 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java @@ -177,7 +177,7 @@ public class TestStripeCompactor { null, NoLimitThroughputController.INSTANCE, null); assertEquals(output.length, paths.size()); writers.verifyKvs(output, true, true); - List boundaries = new ArrayList(output.length + 2); + List boundaries = new ArrayList<>(output.length + 2); boundaries.add(left); for (int i = 1; i < output.length; ++i) { boundaries.add(CellUtil.cloneRow(output[i][0])); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java index 055fe1c6517..af8c27dc9d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java @@ -79,8 +79,7 @@ public class TestCompactionScanQueryMatcher extends AbstractTestScanQueryMatcher CompactionScanQueryMatcher qm = CompactionScanQueryMatcher.create(scanInfo, ScanType.COMPACT_RETAIN_DELETES, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP, HConstants.OLDEST_TIMESTAMP, now, from, to, null); - List actual = new ArrayList( - rows.length); + List actual = new ArrayList<>(rows.length); byte[] prevRow = null; for (byte[] row : rows) { if (prevRow == null || !Bytes.equals(prevRow, row)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java index 34805976aba..4e07f802faf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java @@ -49,7 +49,7 @@ public class TestExplicitColumnTracker { ColumnTracker exp = new ExplicitColumnTracker(trackColumns, 0, maxVersions, Long.MIN_VALUE); // Initialize result - List result = new ArrayList(scannerColumns.size()); + List result = new ArrayList<>(scannerColumns.size()); long timestamp = 0; // "Match" @@ -67,11 +67,11 @@ public class TestExplicitColumnTracker { @Test public void testGetSingleVersion() throws IOException { // Create tracker - TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); // Looking for every other columns.add(col2); columns.add(col4); - List expected = new ArrayList(5); + List expected = new ArrayList<>(5); expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); // col1 expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL); // col2 expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); // col3 @@ -80,7 +80,7 @@ public class TestExplicitColumnTracker { int maxVersions = 1; // Create "Scanner" - List scanner = new ArrayList(5); + List scanner = new ArrayList<>(5); scanner.add(col1); scanner.add(col2); scanner.add(col3); @@ -93,12 +93,12 @@ public class TestExplicitColumnTracker { @Test public void testGetMultiVersion() throws IOException { // Create tracker - TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); // Looking for every other columns.add(col2); columns.add(col4); - List expected = new ArrayList(15); + List expected = new ArrayList<>(15); expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); @@ -121,7 +121,7 @@ public class TestExplicitColumnTracker { int maxVersions = 2; // Create "Scanner" - List scanner = new ArrayList(15); + List scanner = new ArrayList<>(15); scanner.add(col1); scanner.add(col1); scanner.add(col1); @@ -148,7 +148,7 @@ public class TestExplicitColumnTracker { @Test public void testStackOverflow() throws IOException { int maxVersions = 1; - TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < 100000; i++) { columns.add(Bytes.toBytes("col" + i)); } @@ -173,7 +173,7 @@ public class TestExplicitColumnTracker { */ @Test public void testInfiniteLoop() throws IOException { - TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); + TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); columns.addAll(Arrays.asList(new byte[][] { col2, col3, col5 })); List scanner = Arrays. asList(new byte[][] { col1, col4 }); List expected = Arrays. asList( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java index 28529470202..6d6e58e6a71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java @@ -44,20 +44,20 @@ public class TestScanWildcardColumnTracker { ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE); // Create list of qualifiers - List qualifiers = new ArrayList(4); + List qualifiers = new ArrayList<>(4); qualifiers.add(Bytes.toBytes("qualifier1")); qualifiers.add(Bytes.toBytes("qualifier2")); qualifiers.add(Bytes.toBytes("qualifier3")); qualifiers.add(Bytes.toBytes("qualifier4")); // Setting up expected result - List expected = new ArrayList(4); + List expected = new ArrayList<>(4); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); - List actual = new ArrayList(qualifiers.size()); + List actual = new ArrayList<>(qualifiers.size()); for (byte[] qualifier : qualifiers) { ScanQueryMatcher.MatchCode mc = ScanQueryMatcher.checkColumn(tracker, qualifier, 0, @@ -76,20 +76,20 @@ public class TestScanWildcardColumnTracker { ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE); // Create list of qualifiers - List qualifiers = new ArrayList(4); + List qualifiers = new ArrayList<>(4); qualifiers.add(Bytes.toBytes("qualifier1")); qualifiers.add(Bytes.toBytes("qualifier1")); qualifiers.add(Bytes.toBytes("qualifier1")); qualifiers.add(Bytes.toBytes("qualifier2")); // Setting up expected result - List expected = new ArrayList(4); + List expected = new ArrayList<>(4); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); - List actual = new ArrayList(qualifiers.size()); + List actual = new ArrayList<>(qualifiers.size()); long timestamp = 0; for (byte[] qualifier : qualifiers) { @@ -109,7 +109,7 @@ public class TestScanWildcardColumnTracker { ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE); // Create list of qualifiers - List qualifiers = new ArrayList(2); + List qualifiers = new ArrayList<>(2); qualifiers.add(Bytes.toBytes("qualifier2")); qualifiers.add(Bytes.toBytes("qualifier1")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java index 08314049527..b4e4311ed09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java @@ -69,7 +69,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // of just byte [] // Expected result - List expected = new ArrayList(6); + List expected = new ArrayList<>(6); expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL); expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); @@ -83,7 +83,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, 0, rowComparator), get.getFamilyMap().get(fam2), now - ttl, now, null); - List memstore = new ArrayList(6); + List memstore = new ArrayList<>(6); memstore.add(new KeyValue(row1, fam2, col1, 1, data)); memstore.add(new KeyValue(row1, fam2, col2, 1, data)); memstore.add(new KeyValue(row1, fam2, col3, 1, data)); @@ -92,7 +92,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { memstore.add(new KeyValue(row2, fam1, col1, data)); - List actual = new ArrayList(memstore.size()); + List actual = new ArrayList<>(memstore.size()); KeyValue k = memstore.get(0); qm.setToNewRow(k); @@ -113,7 +113,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // of just byte [] // Expected result - List expected = new ArrayList(6); + List expected = new ArrayList<>(6); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); expected.add(ScanQueryMatcher.MatchCode.INCLUDE); @@ -126,7 +126,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, 0, rowComparator), null, now - ttl, now, null); - List memstore = new ArrayList(6); + List memstore = new ArrayList<>(6); memstore.add(new KeyValue(row1, fam2, col1, 1, data)); memstore.add(new KeyValue(row1, fam2, col2, 1, data)); memstore.add(new KeyValue(row1, fam2, col3, 1, data)); @@ -134,7 +134,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { memstore.add(new KeyValue(row1, fam2, col5, 1, data)); memstore.add(new KeyValue(row2, fam1, col1, 1, data)); - List actual = new ArrayList(memstore.size()); + List actual = new ArrayList<>(memstore.size()); KeyValue k = memstore.get(0); qm.setToNewRow(k); @@ -181,7 +181,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { KeyValue k = kvs[0]; qm.setToNewRow(k); - List actual = new ArrayList(kvs.length); + List actual = new ArrayList<>(kvs.length); for (KeyValue kv : kvs) { actual.add(qm.match(kv)); } @@ -222,7 +222,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { KeyValue k = kvs[0]; qm.setToNewRow(k); - List actual = new ArrayList(kvs.length); + List actual = new ArrayList<>(kvs.length); for (KeyValue kv : kvs) { actual.add(qm.match(kv)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index fb0b514274b..0be7b3157c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -255,11 +255,11 @@ public abstract class AbstractTestFSWAL { new HRegionInfo(t2.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); // add edits and roll the wal MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); - NavigableMap scopes1 = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap scopes1 = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : t1.getFamiliesKeys()) { scopes1.put(fam, 0); } - NavigableMap scopes2 = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap scopes2 = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : t2.getFamiliesKeys()) { scopes2.put(fam, 0); } @@ -362,7 +362,7 @@ public abstract class AbstractTestFSWAL { HBaseTestingUtility.closeRegionAndWAL(r); final int countPerFamily = 10; final AtomicBoolean goslow = new AtomicBoolean(false); - NavigableMap scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -402,7 +402,7 @@ public abstract class AbstractTestFSWAL { } } // Add any old cluster id. - List clusterIds = new ArrayList(1); + List clusterIds = new ArrayList<>(1); clusterIds.add(UUID.randomUUID()); // Now make appends run slow. goslow.set(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java index f70bcc836e1..04a4bbc0b0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java @@ -130,7 +130,7 @@ public abstract class AbstractTestLogRollPeriod { private void checkMinLogRolls(final WAL log, final int minRolls) throws Exception { - final List paths = new ArrayList(); + final List paths = new ArrayList<>(); log.registerWALActionsListener(new WALActionsListener.Base() { @Override public void postLogRoll(Path oldFile, Path newFile) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 90eacf0a042..237d24a2678 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -299,8 +299,7 @@ public abstract class AbstractTestWALReplay { // Add 1k to each family. final int countPerFamily = 1000; - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -366,7 +365,7 @@ public abstract class AbstractTestWALReplay { Path f = new Path(basedir, "hfile"); HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(""), Bytes.toBytes("z"), 10); - List> hfs = new ArrayList>(1); + List> hfs = new ArrayList<>(1); hfs.add(Pair.newPair(family, f.toString())); region.bulkLoadHFiles(hfs, true, null); @@ -434,7 +433,7 @@ public abstract class AbstractTestWALReplay { region.put((new Put(row)).addColumn(family, family, family)); wal.sync(); - List > hfs= new ArrayList>(1); + List > hfs= new ArrayList<>(1); for (int i = 0; i < 3; i++) { Path f = new Path(basedir, "hfile"+i); HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(i + "00"), @@ -700,8 +699,7 @@ public abstract class AbstractTestWALReplay { HRegion region = HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null); int writtenRowCount = 10; - List families = new ArrayList( - htd.getFamilies()); + List families = new ArrayList<>(htd.getFamilies()); for (int i = 0; i < writtenRowCount; i++) { Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i))); put.addColumn(families.get(i % families.size()).getName(), Bytes.toBytes("q"), @@ -759,7 +757,7 @@ public abstract class AbstractTestWALReplay { private int getScannedCount(RegionScanner scanner) throws IOException { int scannedCount = 0; - List results = new ArrayList(); + List results = new ArrayList<>(); while (true) { boolean existMore = scanner.next(results); if (!results.isEmpty()) @@ -794,9 +792,8 @@ public abstract class AbstractTestWALReplay { // Add 1k to each family. final int countPerFamily = 1000; - Set familyNames = new HashSet(); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + Set familyNames = new HashSet<>(); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -1048,7 +1045,7 @@ public abstract class AbstractTestWALReplay { deleteDir(basedir); final HTableDescriptor htd = createBasic1FamilyHTD(tableName); - NavigableMap scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -1196,7 +1193,7 @@ public abstract class AbstractTestWALReplay { static List addRegionEdits(final byte[] rowName, final byte[] family, final int count, EnvironmentEdge ee, final Region r, final String qualifierPrefix) throws IOException { - List puts = new ArrayList(); + List puts = new ArrayList<>(); for (int j = 0; j < count; j++) { byte[] qualifier = Bytes.toBytes(qualifierPrefix + Integer.toString(j)); Put p = new Put(rowName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java index c654c16cdb5..1af21d27519 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java @@ -31,7 +31,7 @@ public class FaultyProtobufLogReader extends ProtobufLogReader { BEGINNING, MIDDLE, END, NONE } - Queue nextQueue = new LinkedList(); + Queue nextQueue = new LinkedList<>(); int numberOfFileEntries = 0; FailureType getFailureType() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java index 9e546e66678..8847c4c0d85 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java @@ -102,7 +102,7 @@ public class TestFSHLog extends AbstractTestFSWAL { syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(this.name.getMethodName())).addFamily(new HColumnDescriptor("row")); - NavigableMap scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java index 104f897898e..4a256a6b78e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestKeyValueCompression.java @@ -107,7 +107,7 @@ public class TestKeyValueCompression { byte[] cf = Bytes.toBytes("myCF"); byte[] q = Bytes.toBytes("myQualifier"); byte[] value = Bytes.toBytes("myValue"); - List tags = new ArrayList(noOfTags); + List tags = new ArrayList<>(noOfTags); for (int i = 1; i <= noOfTags; i++) { tags.add(new ArrayBackedTag((byte) i, Bytes.toBytes("tagValue" + i))); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 22395c8ad62..ccb00c71fdf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -199,8 +199,7 @@ public class TestLogRollAbort { kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("column")); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 19c534e3ce9..5bc4c9b6fe4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -245,8 +245,8 @@ public class TestLogRolling extends AbstractTestLogRolling { server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); HRegionInfo region = server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo(); final WAL log = server.getWAL(region); - final List paths = new ArrayList(1); - final List preLogRolledCalled = new ArrayList(); + final List paths = new ArrayList<>(1); + final List preLogRolledCalled = new ArrayList<>(); paths.add(AbstractFSWALProvider.getCurrentFileName(log)); log.registerWALActionsListener(new WALActionsListener.Base() { @@ -307,7 +307,7 @@ public class TestLogRolling extends AbstractTestLogRolling { preLogRolledCalled.size() >= 1); // read back the data written - Set loggedRows = new HashSet(); + Set loggedRows = new HashSet<>(); FSUtils fsUtils = FSUtils.getInstance(fs, TEST_UTIL.getConfiguration()); for (Path p : paths) { LOG.debug("recovering lease for " + p); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 7412128d5b4..d3d582c2465 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -157,8 +157,7 @@ public class TestLogRollingNoCluster { edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; final HTableDescriptor htd = TEST_UTIL.getMetaTableDescriptor(); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java index 9fd0cb11059..9f5acbdfc28 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSequenceIdAccounting.java @@ -38,7 +38,7 @@ public class TestSequenceIdAccounting { private static final byte [] FAMILY_NAME = Bytes.toBytes("cf"); private static final Set FAMILIES; static { - FAMILIES = new HashSet(); + FAMILIES = new HashSet<>(); FAMILIES.add(FAMILY_NAME); } @@ -46,7 +46,7 @@ public class TestSequenceIdAccounting { public void testStartCacheFlush() { SequenceIdAccounting sida = new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); - Map m = new HashMap(); + Map m = new HashMap<>(); m.put(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); assertEquals(HConstants.NO_SEQNUM, (long)sida.startCacheFlush(ENCODED_REGION_NAME, FAMILIES)); sida.completeCacheFlush(ENCODED_REGION_NAME); @@ -57,7 +57,7 @@ public class TestSequenceIdAccounting { sida.completeCacheFlush(ENCODED_REGION_NAME); long currentSequenceId = sequenceid; sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid, true); - final Set otherFamily = new HashSet(1); + final Set otherFamily = new HashSet<>(1); otherFamily.add(Bytes.toBytes("otherCf")); sida.update(ENCODED_REGION_NAME, FAMILIES, ++sequenceid, true); // Should return oldest sequence id in the region. @@ -69,7 +69,7 @@ public class TestSequenceIdAccounting { public void testAreAllLower() { SequenceIdAccounting sida = new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); - Map m = new HashMap(); + Map m = new HashMap<>(); m.put(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); assertTrue(sida.areAllLower(m)); long sequenceid = 1; @@ -117,7 +117,7 @@ public class TestSequenceIdAccounting { public void testFindLower() { SequenceIdAccounting sida = new SequenceIdAccounting(); sida.getOrCreateLowestSequenceIds(ENCODED_REGION_NAME); - Map m = new HashMap(); + Map m = new HashMap<>(); m.put(ENCODED_REGION_NAME, HConstants.NO_SEQNUM); long sequenceid = 1; sida.update(ENCODED_REGION_NAME, FAMILIES, sequenceid, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java index 9ac9f0e48e7..9f9e2df3ba5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java @@ -95,7 +95,7 @@ public class TestWALActionsListener { @Test public void testActionListener() throws Exception { DummyWALActionsListener observer = new DummyWALActionsListener(); - List list = new ArrayList(1); + List list = new ArrayList<>(1); list.add(observer); final WALFactory wals = new WALFactory(conf, list, "testActionListener"); DummyWALActionsListener laterobserver = new DummyWALActionsListener(); @@ -110,8 +110,7 @@ public class TestWALActionsListener { edit.add(kv); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(SOME_BYTES)); htd.addFamily(new HColumnDescriptor(b)); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java index c4329b88733..8a246be4e6e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALCellCodecWithCompression.java @@ -102,7 +102,7 @@ public class TestWALCellCodecWithCompression { byte[] cf = Bytes.toBytes("myCF"); byte[] q = Bytes.toBytes("myQualifier"); byte[] value = Bytes.toBytes("myValue"); - List tags = new ArrayList(noOfTags); + List tags = new ArrayList<>(noOfTags); for (int i = 1; i <= noOfTags; i++) { tags.add(new ArrayBackedTag((byte) i, Bytes.toBytes("tagValue" + i))); } @@ -114,7 +114,7 @@ public class TestWALCellCodecWithCompression { byte[] cf = Bytes.toBytes("myCF"); byte[] q = Bytes.toBytes("myQualifier"); byte[] value = Bytes.toBytes("myValue"); - List tags = new ArrayList(noOfTags); + List tags = new ArrayList<>(noOfTags); for (int i = 1; i <= noOfTags; i++) { tags.add(new ArrayBackedTag((byte) i, Bytes.toBytes("tagValue" + i))); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java index 381456230e7..e296f8704d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java @@ -165,7 +165,7 @@ public class TestNamespaceReplication extends TestReplicationBase { namespaces.add(ns2); rpc.setNamespaces(namespaces); Map> tableCfs = new HashMap<>(); - tableCfs.put(tabAName, new ArrayList()); + tableCfs.put(tabAName, new ArrayList<>()); tableCfs.get(tabAName).add("f1"); rpc.setTableCFsMap(tableCfs); admin.updatePeerConfig("2", rpc); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index a7a4cd82d50..abf2db3a1ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -283,7 +283,7 @@ public class TestPerTableCFReplication { // 1. null or empty string, result should be null assertNull(ReplicationSerDeHelper.convert(tabCFsMap)); - tabCFsMap = new HashMap>(); + tabCFsMap = new HashMap<>(); tableCFs = ReplicationSerDeHelper.convert(tabCFsMap); assertEquals(0, tableCFs.length); @@ -301,7 +301,7 @@ public class TestPerTableCFReplication { assertEquals(0, tableCFs[0].getFamiliesCount()); tabCFsMap.clear(); - tabCFsMap.put(tableName2, new ArrayList()); + tabCFsMap.put(tableName2, new ArrayList<>()); tabCFsMap.get(tableName2).add("cf1"); tableCFs = ReplicationSerDeHelper.convert(tabCFsMap); assertEquals(1, tableCFs.length); // only one table @@ -311,7 +311,7 @@ public class TestPerTableCFReplication { assertEquals("cf1", tableCFs[0].getFamilies(0).toStringUtf8()); tabCFsMap.clear(); - tabCFsMap.put(tableName3, new ArrayList()); + tabCFsMap.put(tableName3, new ArrayList<>()); tabCFsMap.get(tableName3).add("cf1"); tabCFsMap.get(tableName3).add("cf3"); tableCFs = ReplicationSerDeHelper.convert(tabCFsMap); @@ -324,9 +324,9 @@ public class TestPerTableCFReplication { tabCFsMap.clear(); tabCFsMap.put(tableName1, null); - tabCFsMap.put(tableName2, new ArrayList()); + tabCFsMap.put(tableName2, new ArrayList<>()); tabCFsMap.get(tableName2).add("cf1"); - tabCFsMap.put(tableName3, new ArrayList()); + tabCFsMap.put(tableName3, new ArrayList<>()); tabCFsMap.get(tableName3).add("cf1"); tabCFsMap.get(tableName3).add("cf3"); @@ -406,7 +406,7 @@ public class TestPerTableCFReplication { rpc2.setClusterKey(utility2.getClusterKey()); Map> tableCFs = new HashMap<>(); tableCFs.put(tabCName, null); - tableCFs.put(tabBName, new ArrayList()); + tableCFs.put(tabBName, new ArrayList<>()); tableCFs.get(tabBName).add("f1"); tableCFs.get(tabBName).add("f3"); replicationAdmin.addPeer("2", rpc2, tableCFs); @@ -415,7 +415,7 @@ public class TestPerTableCFReplication { rpc3.setClusterKey(utility3.getClusterKey()); tableCFs.clear(); tableCFs.put(tabAName, null); - tableCFs.put(tabBName, new ArrayList()); + tableCFs.put(tabBName, new ArrayList<>()); tableCFs.get(tabBName).add("f1"); tableCFs.get(tabBName).add("f2"); replicationAdmin.addPeer("3", rpc3, tableCFs); @@ -462,17 +462,17 @@ public class TestPerTableCFReplication { // B. change peers' replicable table-cf config tableCFs.clear(); - tableCFs.put(tabAName, new ArrayList()); + tableCFs.put(tabAName, new ArrayList<>()); tableCFs.get(tabAName).add("f1"); tableCFs.get(tabAName).add("f2"); - tableCFs.put(tabCName, new ArrayList()); + tableCFs.put(tabCName, new ArrayList<>()); tableCFs.get(tabCName).add("f2"); tableCFs.get(tabCName).add("f3"); replicationAdmin.setPeerTableCFs("2", tableCFs); tableCFs.clear(); tableCFs.put(tabBName, null); - tableCFs.put(tabCName, new ArrayList()); + tableCFs.put(tabCName, new ArrayList<>()); tableCFs.get(tabCName).add("f3"); replicationAdmin.setPeerTableCFs("3", tableCFs); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 474039b91b7..caad544d8ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -142,8 +142,7 @@ public class TestReplicationBase { table.addFamily(fam); fam = new HColumnDescriptor(noRepfamName); table.addFamily(fam); - scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(HColumnDescriptor f : table.getColumnFamilies()) { scopes.put(f.getName(), f.getScope()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java index 5e8d569ab12..4925aab12ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java @@ -106,7 +106,7 @@ public class TestReplicationEndpoint extends TestReplicationBase { @Override public String explainFailure() throws Exception { - List logRollInProgressRsList = new ArrayList(); + List logRollInProgressRsList = new ArrayList<>(); for (RegionServerThread rs : rsThreads) { if (!rs.getRegionServer().walRollRequestFinished()) { logRollInProgressRsList.add(rs.getRegionServer().toString()); @@ -462,7 +462,7 @@ public class TestReplicationEndpoint extends TestReplicationBase { public static class ReplicationEndpointReturningFalse extends ReplicationEndpointForTest { static int COUNT = 10; - static AtomicReference ex = new AtomicReference(null); + static AtomicReference ex = new AtomicReference<>(null); static AtomicBoolean replicated = new AtomicBoolean(false); @Override public boolean replicate(ReplicateContext replicateContext) { @@ -483,7 +483,7 @@ public class TestReplicationEndpoint extends TestReplicationBase { // return a WALEntry filter which only accepts "row", but not other rows public static class ReplicationEndpointWithWALEntryFilter extends ReplicationEndpointForTest { - static AtomicReference ex = new AtomicReference(null); + static AtomicReference ex = new AtomicReference<>(null); @Override public boolean replicate(ReplicateContext replicateContext) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index 9536f9ff0f0..1c5a994bfcb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -413,7 +413,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { @Test(timeout=300000) public void testLoading() throws Exception { LOG.info("Writing out rows to table1 in testLoading"); - List puts = new ArrayList(NB_ROWS_IN_BIG_BATCH); + List puts = new ArrayList<>(NB_ROWS_IN_BIG_BATCH); for (int i = 0; i < NB_ROWS_IN_BIG_BATCH; i++) { Put put = new Put(Bytes.toBytes(i)); put.addColumn(famName, row, row); @@ -519,8 +519,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { fam.setMaxVersions(100); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); - scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (HColumnDescriptor f : table.getColumnFamilies()) { scopes.put(f.getName(), f.getScope()); } @@ -818,7 +817,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { HRegion region = utility1.getMiniHBaseCluster().getRegions(tableName).get(0); HRegionInfo hri = region.getRegionInfo(); - NavigableMap scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (byte[] fam : htable1.getTableDescriptor().getFamiliesKeys()) { scopes.put(fam, 1); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index c7c1b89df8a..15d15b3dafd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -205,9 +205,9 @@ public abstract class TestReplicationStateBasic { rqc.init(); List> files1 = new ArrayList<>(3); - files1.add(new Pair(null, new Path("file_1"))); - files1.add(new Pair(null, new Path("file_2"))); - files1.add(new Pair(null, new Path("file_3"))); + files1.add(new Pair<>(null, new Path("file_1"))); + files1.add(new Pair<>(null, new Path("file_2"))); + files1.add(new Pair<>(null, new Path("file_3"))); assertNull(rqc.getReplicableHFiles(ID_ONE)); assertEquals(0, rqc.getAllPeersFromHFileRefsQueue().size()); rp.registerPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE)); @@ -241,9 +241,9 @@ public abstract class TestReplicationStateBasic { rq1.addPeerToHFileRefs(ID_TWO); List> files1 = new ArrayList<>(3); - files1.add(new Pair(null, new Path("file_1"))); - files1.add(new Pair(null, new Path("file_2"))); - files1.add(new Pair(null, new Path("file_3"))); + files1.add(new Pair<>(null, new Path("file_1"))); + files1.add(new Pair<>(null, new Path("file_2"))); + files1.add(new Pair<>(null, new Path("file_3"))); rq1.addHFileRefs(ID_ONE, files1); rq1.addHFileRefs(ID_TWO, files1); assertEquals(2, rqc.getAllPeersFromHFileRefsQueue().size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java index e61ceb23491..9ec9b999ee3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java @@ -277,7 +277,7 @@ public class TestReplicationSyncUpTool extends TestReplicationBase { LOG.debug("mimicSyncUpAfterDelete"); utility2.shutdownMiniHBaseCluster(); - List list = new ArrayList(); + List list = new ArrayList<>(); // delete half of the rows for (int i = 0; i < NB_ROWS_IN_BATCH / 2; i++) { String rowKey = "row" + i; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java index 0222513f984..388b6cce03d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java @@ -99,7 +99,7 @@ public class TestReplicationTrackerZKImpl { rsRemovedCount = new AtomicInteger(0); rsRemovedData = ""; plChangedCount = new AtomicInteger(0); - plChangedData = new ArrayList(); + plChangedData = new ArrayList<>(); peerRemovedCount = new AtomicInteger(0); peerRemovedData = ""; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index 2dbacafbc11..8ea0baed774 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -93,19 +93,19 @@ public class TestReplicationWALEntryFilters { assertEquals(null, filter.filter(userEntry)); // empty scopes - TreeMap scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); userEntry = createEntry(scopes, a, b); assertEquals(null, filter.filter(userEntry)); // different scope - scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(c, HConstants.REPLICATION_SCOPE_GLOBAL); userEntry = createEntry(scopes, a, b); // all kvs should be filtered assertEquals(userEntryEmpty, filter.filter(userEntry)); // local scope - scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(a, HConstants.REPLICATION_SCOPE_LOCAL); userEntry = createEntry(scopes, a, b); assertEquals(userEntryEmpty, filter.filter(userEntry)); @@ -113,7 +113,7 @@ public class TestReplicationWALEntryFilters { assertEquals(userEntryEmpty, filter.filter(userEntry)); // only scope a - scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(a, HConstants.REPLICATION_SCOPE_GLOBAL); userEntry = createEntry(scopes, a, b); assertEquals(userEntryA, filter.filter(userEntry)); @@ -121,7 +121,7 @@ public class TestReplicationWALEntryFilters { assertEquals(userEntryA, filter.filter(userEntry)); // only scope b - scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(b, HConstants.REPLICATION_SCOPE_GLOBAL); userEntry = createEntry(scopes, a, b); assertEquals(userEntryB, filter.filter(userEntry)); @@ -129,7 +129,7 @@ public class TestReplicationWALEntryFilters { assertEquals(userEntryB, filter.filter(userEntry)); // scope a and b - scopes = new TreeMap(Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(b, HConstants.REPLICATION_SCOPE_GLOBAL); userEntry = createEntry(scopes, a, b); assertEquals(userEntryB, filter.filter(userEntry)); @@ -213,14 +213,14 @@ public class TestReplicationWALEntryFilters { // 2. Only config table-cfs in peer // empty map userEntry = createEntry(null, a, b, c); - Map> tableCfs = new HashMap>(); + Map> tableCfs = new HashMap<>(); when(peer.getTableCFs()).thenReturn(tableCfs); filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer)); assertEquals(null, filter.filter(userEntry)); // table bar userEntry = createEntry(null, a, b, c); - tableCfs = new HashMap>(); + tableCfs = new HashMap<>(); tableCfs.put(TableName.valueOf("bar"), null); when(peer.getTableCFs()).thenReturn(tableCfs); filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer)); @@ -228,7 +228,7 @@ public class TestReplicationWALEntryFilters { // table foo:a userEntry = createEntry(null, a, b, c); - tableCfs = new HashMap>(); + tableCfs = new HashMap<>(); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a")); when(peer.getTableCFs()).thenReturn(tableCfs); filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer)); @@ -236,7 +236,7 @@ public class TestReplicationWALEntryFilters { // table foo:a,c userEntry = createEntry(null, a, b, c, d); - tableCfs = new HashMap>(); + tableCfs = new HashMap<>(); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a", "c")); when(peer.getTableCFs()).thenReturn(tableCfs); filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer)); @@ -245,7 +245,7 @@ public class TestReplicationWALEntryFilters { // 3. Only config namespaces in peer when(peer.getTableCFs()).thenReturn(null); // empty set - Set namespaces = new HashSet(); + Set namespaces = new HashSet<>(); when(peer.getNamespaces()).thenReturn(namespaces); userEntry = createEntry(null, a, b, c); filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer)); @@ -259,7 +259,7 @@ public class TestReplicationWALEntryFilters { assertEquals(createEntry(null, a,b,c), filter.filter(userEntry)); // namespace ns1 - namespaces = new HashSet();; + namespaces = new HashSet<>(); namespaces.add("ns1"); when(peer.getNamespaces()).thenReturn(namespaces); userEntry = createEntry(null, a, b, c); @@ -268,8 +268,8 @@ public class TestReplicationWALEntryFilters { // 4. Config namespaces and table-cfs both // Namespaces config should not confict with table-cfs config - namespaces = new HashSet(); - tableCfs = new HashMap>(); + namespaces = new HashSet<>(); + tableCfs = new HashMap<>(); namespaces.add("ns1"); when(peer.getNamespaces()).thenReturn(namespaces); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a", "c")); @@ -278,8 +278,8 @@ public class TestReplicationWALEntryFilters { filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer)); assertEquals(createEntry(null, a, c), filter.filter(userEntry)); - namespaces = new HashSet();; - tableCfs = new HashMap>(); + namespaces = new HashSet<>(); + tableCfs = new HashMap<>(); namespaces.add("default"); when(peer.getNamespaces()).thenReturn(namespaces); tableCfs.put(TableName.valueOf("ns1:foo"), Lists.newArrayList("a", "c")); @@ -288,8 +288,8 @@ public class TestReplicationWALEntryFilters { filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer)); assertEquals(createEntry(null, a, b, c), filter.filter(userEntry)); - namespaces = new HashSet();; - tableCfs = new HashMap>(); + namespaces = new HashSet<>(); + tableCfs = new HashMap<>(); namespaces.add("ns1"); when(peer.getNamespaces()).thenReturn(namespaces); tableCfs.put(TableName.valueOf("bar"), null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java index ebf00e38c0b..4369e5e5983 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java @@ -203,7 +203,7 @@ public class TestReplicationWithTags { final WALEdit edit, final Durability durability) throws IOException { byte[] attribute = put.getAttribute("visibility"); byte[] cf = null; - List updatedCells = new ArrayList(); + List updatedCells = new ArrayList<>(); if (attribute != null) { for (List edits : put.getFamilyCellMap().values()) { for (Cell cell : edits) { @@ -212,7 +212,7 @@ public class TestReplicationWithTags { cf = CellUtil.cloneFamily(kv); } Tag tag = new ArrayBackedTag(TAG_TYPE, attribute); - List tagList = new ArrayList(1); + List tagList = new ArrayList<>(1); tagList.add(tag); KeyValue newKV = new KeyValue(CellUtil.cloneRow(kv), 0, kv.getRowLength(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java index dcfa7365d9e..ad5063a31f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java @@ -146,7 +146,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { public void after() throws Exception { } - static ConcurrentLinkedQueue entries = new ConcurrentLinkedQueue(); + static ConcurrentLinkedQueue entries = new ConcurrentLinkedQueue<>(); public static class WALEditCopro implements WALObserver { public WALEditCopro() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java index 4ae14b799bd..0e08c90fd5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java @@ -153,8 +153,8 @@ public class TestReplicationSink { */ @Test public void testBatchSink() throws Exception { - List entries = new ArrayList(BATCH_SIZE); - List cells = new ArrayList(); + List entries = new ArrayList<>(BATCH_SIZE); + List cells = new ArrayList<>(); for(int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } @@ -171,16 +171,16 @@ public class TestReplicationSink { */ @Test public void testMixedPutDelete() throws Exception { - List entries = new ArrayList(BATCH_SIZE/2); - List cells = new ArrayList(); + List entries = new ArrayList<>(BATCH_SIZE/2); + List cells = new ArrayList<>(); for(int i = 0; i < BATCH_SIZE/2; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId, baseNamespaceDir, hfileArchiveDir); - entries = new ArrayList(BATCH_SIZE); - cells = new ArrayList(); + entries = new ArrayList<>(BATCH_SIZE); + cells = new ArrayList<>(); for(int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1, i, i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn, cells)); @@ -199,8 +199,8 @@ public class TestReplicationSink { */ @Test public void testMixedPutTables() throws Exception { - List entries = new ArrayList(BATCH_SIZE/2); - List cells = new ArrayList(); + List entries = new ArrayList<>(BATCH_SIZE/2); + List cells = new ArrayList<>(); for(int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry( i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1, i, KeyValue.Type.Put, cells)); @@ -221,15 +221,15 @@ public class TestReplicationSink { */ @Test public void testMixedDeletes() throws Exception { - List entries = new ArrayList(3); - List cells = new ArrayList(); + List entries = new ArrayList<>(3); + List cells = new ArrayList<>(); for(int i = 0; i < 3; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir); - entries = new ArrayList(3); - cells = new ArrayList(); + entries = new ArrayList<>(3); + cells = new ArrayList<>(); entries.add(createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn, cells)); entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells)); entries.add(createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn, cells)); @@ -249,8 +249,8 @@ public class TestReplicationSink { */ @Test public void testApplyDeleteBeforePut() throws Exception { - List entries = new ArrayList(5); - List cells = new ArrayList(); + List entries = new ArrayList<>(5); + List cells = new ArrayList<>(); for(int i = 0; i < 2; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } @@ -284,7 +284,7 @@ public class TestReplicationSink { } List numberList = new ArrayList<>(numbers); Collections.sort(numberList); - Map storeFilesSize = new HashMap(1); + Map storeFilesSize = new HashMap<>(1); // 2. Create 25 hfiles Configuration conf = TEST_UTIL.getConfiguration(); @@ -313,7 +313,7 @@ public class TestReplicationSink { storeFiles, storeFilesSize, 1); edit = WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor); } - List entries = new ArrayList(1); + List entries = new ArrayList<>(1); // 4. Create a WALEntryBuilder WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index af3bf83b299..026f8e405d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -144,7 +144,7 @@ public abstract class TestReplicationSourceManager { protected static CountDownLatch latch; - protected static List files = new ArrayList(); + protected static List files = new ArrayList<>(); protected static NavigableMap scopes; protected static void setupZkAndReplication() throws Exception { @@ -182,8 +182,7 @@ public abstract class TestReplicationSourceManager { col.setScope(HConstants.REPLICATION_SCOPE_LOCAL); htd.addFamily(col); - scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -225,7 +224,7 @@ public abstract class TestReplicationSourceManager { WALEdit edit = new WALEdit(); edit.add(kv); - List listeners = new ArrayList(1); + List listeners = new ArrayList<>(1); listeners.add(replication); final WALFactory wals = new WALFactory(utility.getConfiguration(), listeners, URLEncoder.encode("regionserver:60020", "UTF8")); @@ -233,8 +232,7 @@ public abstract class TestReplicationSourceManager { manager.init(); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tableame")); htd.addFamily(new HColumnDescriptor(f1)); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -341,7 +339,7 @@ public abstract class TestReplicationSourceManager { server.getZooKeeper())); rq.init(server.getServerName().toString()); // populate some znodes in the peer znode - SortedSet files = new TreeSet(); + SortedSet files = new TreeSet<>(); String group = "testgroup"; String file1 = group + ".log1"; String file2 = group + ".log2"; @@ -393,7 +391,7 @@ public abstract class TestReplicationSourceManager { @Test public void testBulkLoadWALEditsWithoutBulkLoadReplicationEnabled() throws Exception { - NavigableMap scope = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap scope = new TreeMap<>(Bytes.BYTES_COMPARATOR); // 1. Get the bulk load wal edit event WALEdit logEdit = getBulkLoadWALEdit(scope); // 2. Create wal key @@ -410,7 +408,7 @@ public abstract class TestReplicationSourceManager { @Test public void testBulkLoadWALEdits() throws Exception { // 1. Get the bulk load wal edit event - NavigableMap scope = new TreeMap(Bytes.BYTES_COMPARATOR); + NavigableMap scope = new TreeMap<>(Bytes.BYTES_COMPARATOR); WALEdit logEdit = getBulkLoadWALEdit(scope); // 2. Create wal key WALKey logKey = new WALKey(scope); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java index 3739d754537..adf09d42f50 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java @@ -127,10 +127,8 @@ class HbaseObjectWritableFor96Migration implements Writable, WritableWithSize, C // Here we maintain two static maps of classes to code and vice versa. // Add new classes+codes as wanted or figure way to auto-generate these // maps. - static final Map> CODE_TO_CLASS = - new HashMap>(); - static final Map, Integer> CLASS_TO_CODE = - new HashMap, Integer>(); + static final Map> CODE_TO_CLASS = new HashMap<>(); + static final Map, Integer> CLASS_TO_CODE = new HashMap<>(); // Special code that means 'not-encoded'; in this case we do old school // sending of the class name using reflection, etc. private static final byte NOT_ENCODED = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java index cf01463d8f4..06389ab7ee9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java @@ -112,7 +112,7 @@ public class TestAccessControlFilter extends SecureTestUtil { Permission.Action.READ); // put some test data - List puts = new ArrayList(100); + List puts = new ArrayList<>(100); for (int i=0; i<100; i++) { Put p = new Put(Bytes.toBytes(i)); p.addColumn(FAMILY, PRIVATE_COL, Bytes.toBytes("secret " + i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 037719022ec..8bf2c5ce34d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -640,7 +640,7 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction listProceduresAction = new AccessTestAction() { @Override public Object run() throws Exception { - List procInfoListClone = new ArrayList(procInfoList.size()); + List procInfoListClone = new ArrayList<>(procInfoList.size()); for(ProcedureInfo pi : procInfoList) { procInfoListClone.add(pi.clone()); } @@ -1763,7 +1763,7 @@ public class TestAccessController extends SecureTestUtil { } List superUsers = Superusers.getSuperUsers(); - List adminPerms = new ArrayList(superUsers.size() + 1); + List adminPerms = new ArrayList<>(superUsers.size() + 1); adminPerms.add(new UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()), AccessControlLists.ACL_TABLE_NAME, null, null, Bytes.toBytes("ACRW"))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java index bbc6ad0ef4f..88cdf1d07e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java @@ -248,7 +248,7 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { } private Map prepareCellPermissions(String[] users, Action... action) { - Map perms = new HashMap(2); + Map perms = new HashMap<>(2); for (String user : users) { perms.put(user, new Permission(action)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java index 29bbbbbae48..102b28ae2c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java @@ -447,7 +447,7 @@ public class TestCellACLs extends SecureTestUtil { } private Map prepareCellPermissions(String[] users, Action... action) { - Map perms = new HashMap(2); + Map perms = new HashMap<>(2); for (String user : users) { perms.put(user, new Permission(action)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java index 7c60f686ab4..08c81074a98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -931,7 +931,7 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preBatchMutate(ObserverContext.createAndPrepare(RCP_ENV, null), - new MiniBatchOperationInProgress(null, null, null, 0, 0)); + new MiniBatchOperationInProgress<>(null, null, null, 0, 0)); return null; } }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java index 6582751decf..cb36246e73a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java @@ -111,7 +111,7 @@ public class TestZKPermissionsWatcher { TablePermission.Action.WRITE)); // update ACL: george RW - List acl = new ArrayList(1); + List acl = new ArrayList<>(1); acl.add(new TablePermission(TEST_TABLE, null, TablePermission.Action.READ, TablePermission.Action.WRITE)); final long mtimeB = AUTH_B.getMTime(); @@ -144,7 +144,7 @@ public class TestZKPermissionsWatcher { TablePermission.Action.WRITE)); // update ACL: hubert R - acl = new ArrayList(1); + acl = new ArrayList<>(1); acl.add(new TablePermission(TEST_TABLE, null, TablePermission.Action.READ)); final long mtimeA = AUTH_A.getMTime(); AUTH_B.setTableUserPermissions("hubert", TEST_TABLE, acl); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 5b46af597ce..0324359ea6b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -137,7 +137,7 @@ public class TestTokenAuthentication { if (initialIsa.getAddress() == null) { throw new IllegalArgumentException("Failed resolve of " + initialIsa); } - final List sai = new ArrayList(1); + final List sai = new ArrayList<>(1); // Make a proxy to go between the shaded Service that rpc expects and the // non-shaded Service this CPEP is providing. This is because this test does a neat // little trick of testing the CPEP Service by inserting it as RpcServer Service. This @@ -351,8 +351,7 @@ public class TestTokenAuthentication { // Ignore above passed in controller -- it is always null ServerRpcController serverController = new ServerRpcController(); final NonShadedBlockingRpcCallback - callback = - new NonShadedBlockingRpcCallback(); + callback = new NonShadedBlockingRpcCallback<>(); getAuthenticationToken((RpcController)null, request, callback); try { serverController.checkFailed(); @@ -370,7 +369,7 @@ public class TestTokenAuthentication { // Ignore above passed in controller -- it is always null ServerRpcController serverController = new ServerRpcController(); NonShadedBlockingRpcCallback callback = - new NonShadedBlockingRpcCallback(); + new NonShadedBlockingRpcCallback<>(); whoAmI(null, request, callback); try { serverController.checkFailed(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java index 8cef21e7b34..d8d6f1ec5ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java @@ -152,7 +152,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer @Override public List getUserAuths(byte[] user, boolean systemCall) throws IOException { assert (labelsRegion != null || systemCall); - List auths = new ArrayList(); + List auths = new ArrayList<>(); Get get = new Get(user); List cells = null; if (labelsRegion == null) { @@ -187,7 +187,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer @Override public List getGroupAuths(String[] groups, boolean systemCall) throws IOException { assert (labelsRegion != null || systemCall); - List auths = new ArrayList(); + List auths = new ArrayList<>(); if (groups != null && groups.length > 0) { for (String group : groups) { Get get = new Get(Bytes.toBytes(AuthUtil.toGroupEntry(group))); @@ -224,7 +224,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer @Override public List listLabels(String regex) throws IOException { // return an empty list for this implementation. - return new ArrayList(); + return new ArrayList<>(); } @Override @@ -237,7 +237,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer throw new IOException(e); } node = this.expressionExpander.expand(node); - List tags = new ArrayList(); + List tags = new ArrayList<>(); if (withSerializationFormat) { tags.add(STRING_SERIALIZATION_FORMAT_TAG); } @@ -270,7 +270,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer try { // null authorizations to be handled inside SLG impl. authLabels = scanLabelGenerator.getLabels(VisibilityUtils.getActiveUser(), authorizations); - authLabels = (authLabels == null) ? new ArrayList() : authLabels; + authLabels = (authLabels == null) ? new ArrayList<>() : authLabels; authorizations = new Authorizations(authLabels); } catch (Throwable t) { LOG.error(t); @@ -334,8 +334,8 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer private Tag createTag(ExpressionNode node) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); - List labels = new ArrayList(); - List notLabels = new ArrayList(); + List labels = new ArrayList<>(); + List notLabels = new ArrayList<>(); extractLabels(node, labels, notLabels); Collections.sort(labels); Collections.sort(notLabels); @@ -402,7 +402,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer if (Superusers.isSuperUser(user)) { return true; } - Set auths = new HashSet(); + Set auths = new HashSet<>(); auths.addAll(this.getUserAuths(Bytes.toBytes(user.getShortName()), true)); auths.addAll(this.getGroupAuths(user.getGroupNames(), true)); return auths.contains(SYSTEM_LABEL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LabelFilteringScanLabelGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LabelFilteringScanLabelGenerator.java index df77400fefc..ed90274b155 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LabelFilteringScanLabelGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/LabelFilteringScanLabelGenerator.java @@ -44,7 +44,7 @@ public class LabelFilteringScanLabelGenerator implements ScanLabelGenerator { public List getLabels(User user, Authorizations authorizations) { if (authorizations != null) { if (labelToFilter == null) return authorizations.getLabels(); - List newAuths = new ArrayList(); + List newAuths = new ArrayList<>(); for (String auth : authorizations.getLabels()) { if (!labelToFilter.equals(auth)) newAuths.add(auth); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java index 2111229024d..83486791a53 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java @@ -438,7 +438,7 @@ public abstract class TestVisibilityLabels { scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); ResultScanner scanner = ht.getScanner(scan); Result result = null; - List results = new ArrayList(); + List results = new ArrayList<>(); while ((result = scanner.next()) != null) { results.add(result); } @@ -456,7 +456,7 @@ public abstract class TestVisibilityLabels { } catch (Throwable e) { fail("Should not have failed"); } - List authsList = new ArrayList(authsResponse.getAuthList().size()); + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); for (ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } @@ -482,7 +482,7 @@ public abstract class TestVisibilityLabels { } } catch (Throwable e) { } - List authsList = new ArrayList(authsResponse.getAuthList().size()); + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); for (ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } @@ -496,7 +496,7 @@ public abstract class TestVisibilityLabels { } protected List extractAuths(String user, List results) { - List auths = new ArrayList(); + List auths = new ArrayList<>(); for (Result result : results) { Cell labelCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); Cell userAuthCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, user.getBytes()); @@ -542,7 +542,7 @@ public abstract class TestVisibilityLabels { Table ht = connection.getTable(LABELS_TABLE_NAME)) { ResultScanner scanner = ht.getScanner(new Scan()); Result result = null; - List results = new ArrayList(); + List results = new ArrayList<>(); while ((result = scanner.next()) != null) { results.add(result); } @@ -557,7 +557,7 @@ public abstract class TestVisibilityLabels { } catch (Throwable e) { fail("Should not have failed"); } - List authsList = new ArrayList(authsResponse.getAuthList().size()); + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); for (ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } @@ -853,7 +853,7 @@ public abstract class TestVisibilityLabels { static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps) throws Exception { - List puts = new ArrayList(labelExps.length); + List puts = new ArrayList<>(labelExps.length); for (int i = 0; i < labelExps.length; i++) { Put put = new Put(Bytes.toBytes("row" + (i+1))); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java index 307bd00f4ba..a3c926e8c0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java @@ -128,7 +128,7 @@ public class TestVisibilityLabelsOpWithDifferentUsersNoACL { authsResponse = NORMAL_USER1.runAs(action1); assertTrue(authsResponse.getAuthList().isEmpty()); authsResponse = SUPERUSER.runAs(action1); - List authsList = new ArrayList(authsResponse.getAuthList().size()); + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); for (ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index a10e3a91872..d79e30d271e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -378,7 +378,7 @@ public class TestVisibilityLabelsReplication { static Table writeData(TableName tableName, String... labelExps) throws Exception { Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME); int i = 1; - List puts = new ArrayList(labelExps.length); + List puts = new ArrayList<>(labelExps.length); for (String labelExp : labelExps) { Put put = new Put(Bytes.toBytes("row" + i)); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value); @@ -399,7 +399,7 @@ public class TestVisibilityLabelsReplication { Durability durability) throws IOException { byte[] attribute = m.getAttribute(NON_VISIBILITY); byte[] cf = null; - List updatedCells = new ArrayList(); + List updatedCells = new ArrayList<>(); if (attribute != null) { for (List edits : m.getFamilyCellMap().values()) { for (Cell cell : edits) { @@ -408,7 +408,7 @@ public class TestVisibilityLabelsReplication { cf = CellUtil.cloneFamily(kv); } Tag tag = new ArrayBackedTag((byte) NON_VIS_TAG_TYPE, attribute); - List tagList = new ArrayList(kv.getTags().size() + 1); + List tagList = new ArrayList<>(kv.getTags().size() + 1); tagList.add(tag); tagList.addAll(kv.getTags()); Cell newcell = CellUtil.createCell(kv, tagList); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java index e236be2831c..f6ff640c48e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java @@ -300,7 +300,7 @@ public class TestVisibilityLabelsWithACL { GetAuthsResponse authsResponse = NORMAL_USER1.runAs(action1); assertNull(authsResponse); authsResponse = SUPERUSER.runAs(action1); - List authsList = new ArrayList(authsResponse.getAuthList().size()); + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); for (ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } @@ -315,7 +315,7 @@ public class TestVisibilityLabelsWithACL { try { table = TEST_UTIL.createTable(tableName, fam); int i = 1; - List puts = new ArrayList(labelExps.length); + List puts = new ArrayList<>(labelExps.length); for (String labelExp : labelExps) { Put put = new Put(Bytes.toBytes("row" + i)); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java index 5cc72d2d855..7b5a5b75267 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java @@ -66,7 +66,7 @@ public class TestVisibilityLabelsWithCustomVisLabService extends TestVisibilityL } protected List extractAuths(String user, List results) { - List auths = new ArrayList(); + List auths = new ArrayList<>(); for (Result result : results) { if (Bytes.equals(result.getRow(), Bytes.toBytes(user))) { NavigableMap familyMap = result.getFamilyMap(LABELS_TABLE_FAMILY); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java index 9853fa2bd40..9e244abb240 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java @@ -1146,7 +1146,7 @@ public class TestVisibilityLabelsWithDeletes { desc.addFamily(colDesc); hBaseAdmin.createTable(desc); - List puts = new ArrayList(5); + List puts = new ArrayList<>(5); Put put = new Put(Bytes.toBytes("row1")); put.addColumn(fam, qual, 123l, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); @@ -3225,7 +3225,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = null; table = TEST_UTIL.createTable(tableName, fam); int i = 1; - List puts = new ArrayList(labelExps.length); + List puts = new ArrayList<>(labelExps.length); for (String labelExp : labelExps) { Put put = new Put(Bytes.toBytes("row" + i)); put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value); @@ -3243,7 +3243,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = null; table = TEST_UTIL.createTable(tableName, fam); int i = 1; - List puts = new ArrayList(labelExps.length); + List puts = new ArrayList<>(labelExps.length); for (String labelExp : labelExps) { Put put = new Put(Bytes.toBytes("row" + i)); put.addColumn(fam, qual, timestamp[i - 1], value); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java index 940d6dc7a7d..9f24f6c8c95 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java @@ -185,7 +185,7 @@ public class TestVisibilityLablesWithGroups { } catch (Throwable e) { fail("Should not have failed"); } - List authsList = new ArrayList(authsResponse.getAuthList().size()); + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); for (ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } @@ -302,7 +302,7 @@ public class TestVisibilityLablesWithGroups { } catch (Throwable e) { fail("Should not have failed"); } - List authsList = new ArrayList(authsResponse.getAuthList().size()); + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); for (ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java index ff348db09b7..3d53a1e6371 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java @@ -142,7 +142,7 @@ public class TestWithDisabledAuthorization { } catch (Throwable t) { fail("Should not have failed"); } - List authsList = new ArrayList(authsResponse.getAuthList().size()); + List authsList = new ArrayList<>(authsResponse.getAuthList().size()); for (ByteString authBS : authsResponse.getAuthList()) { authsList.add(Bytes.toString(authBS.toByteArray())); } @@ -227,7 +227,7 @@ public class TestWithDisabledAuthorization { static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps) throws Exception { - List puts = new ArrayList(labelExps.length + 1); + List puts = new ArrayList<>(labelExps.length + 1); for (int i = 0; i < labelExps.length; i++) { Put put = new Put(Bytes.toBytes("row" + (i+1))); put.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, ZERO); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index 74f49747bc6..ccad85b6e02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -112,7 +112,7 @@ public final class SnapshotTestingUtils { // list the snapshot List snapshots = admin.listSnapshots(); - List returnedSnapshots = new ArrayList(); + List returnedSnapshots = new ArrayList<>(); for (SnapshotDescription sd : snapshots) { if (snapshotName.equals(sd.getName()) && tableName.equals(sd.getTableName())) { returnedSnapshots.add(sd); @@ -213,7 +213,7 @@ public final class SnapshotTestingUtils { HBaseProtos.SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); // Extract regions and families with store files - final Set snapshotFamilies = new TreeSet(Bytes.BYTES_COMPARATOR); + final Set snapshotFamilies = new TreeSet<>(Bytes.BYTES_COMPARATOR); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc); Map regionManifests = manifest.getRegionManifestsMap(); @@ -355,7 +355,7 @@ public final class SnapshotTestingUtils { */ public static ArrayList listHFileNames(final FileSystem fs, final Path tableDir) throws IOException { - final ArrayList hfiles = new ArrayList(); + final ArrayList hfiles = new ArrayList<>(); FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() { @Override public void storeFile(final String region, final String family, final String hfileName) @@ -376,7 +376,7 @@ public final class SnapshotTestingUtils { TableName tableName, String familyName, String snapshotNameString, Path rootDir, FileSystem fs, boolean onlineSnapshot) throws Exception { - ArrayList nonEmptyFamilyNames = new ArrayList(1); + ArrayList nonEmptyFamilyNames = new ArrayList<>(1); nonEmptyFamilyNames.add(Bytes.toBytes(familyName)); createSnapshotAndValidate(admin, tableName, nonEmptyFamilyNames, /* emptyFamilyNames= */ null, snapshotNameString, rootDir, fs, onlineSnapshot); @@ -869,7 +869,7 @@ public final class SnapshotTestingUtils { public static void verifyReplicasCameOnline(TableName tableName, Admin admin, int regionReplication) throws IOException { List regions = admin.getTableRegions(tableName); - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); for (HRegionInfo hri : regions) { set.add(RegionReplicaUtil.getRegionInfoForDefaultReplica(hri)); for (int i = 0; i < regionReplication; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 47601ca335f..1beb5184590 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -205,7 +205,7 @@ public class TestExportSnapshot { FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration()); copyDir = copyDir.makeQualified(fs); - List opts = new ArrayList(); + List opts = new ArrayList<>(); opts.add("--snapshot"); opts.add(Bytes.toString(snapshotName)); opts.add("--copy-to"); @@ -302,7 +302,7 @@ public class TestExportSnapshot { final RegionPredicate bypassregionPredicate) throws IOException { final Path exportedSnapshot = new Path(rootDir, new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); - final Set snapshotFiles = new HashSet(); + final Set snapshotFiles = new HashSet<>(); final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); SnapshotReferenceUtil.visitReferencedFiles(conf, fs, exportedSnapshot, new SnapshotReferenceUtil.SnapshotVisitor() { @@ -338,7 +338,7 @@ public class TestExportSnapshot { private static Set listFiles(final FileSystem fs, final Path root, final Path dir) throws IOException { - Set files = new HashSet(); + Set files = new HashSet<>(); int rootPrefix = root.makeQualified(fs).toString().length(); FileStatus[] list = FSUtils.listStatus(fs, dir); if (list != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java index 77cfbcc2f08..e31e81ebf63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java @@ -47,13 +47,13 @@ public class TestExportSnapshotHelpers { @Test public void testBalanceSplit() throws Exception { // Create a list of files - List> files = new ArrayList>(21); + List> files = new ArrayList<>(21); for (long i = 0; i <= 20; i++) { SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() .setType(SnapshotFileInfo.Type.HFILE) .setHfile("file-" + i) .build(); - files.add(new Pair(fileInfo, i)); + files.add(new Pair<>(fileInfo, i)); } // Create 5 groups (total size 210) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index deb332081c3..86405dcf3c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -224,7 +224,7 @@ public class TestFlushSnapshotFromClient { // take a snapshot of the enabled table String snapshotString = "offlineTableSnapshot"; byte[] snapshot = Bytes.toBytes(snapshotString); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put("table", TABLE_NAME.getNameAsString()); admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION, snapshotString, props); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index b7fb9f7568b..02bd49bf4b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -259,7 +259,7 @@ public class BaseTestHBaseFsck { createTable(TEST_UTIL, desc, SPLITS); tbl = connection.getTable(tablename, tableExecutorService); - List puts = new ArrayList(ROWKEYS.length); + List puts = new ArrayList<>(ROWKEYS.length); for (byte[] row : ROWKEYS) { Put p = new Put(row); p.addColumn(FAM, Bytes.toBytes("val"), row); @@ -283,7 +283,7 @@ public class BaseTestHBaseFsck { createTable(TEST_UTIL, desc, SPLITS); tbl = connection.getTable(tablename, tableExecutorService); - List puts = new ArrayList(ROWKEYS.length); + List puts = new ArrayList<>(ROWKEYS.length); for (byte[] row : ROWKEYS) { Put p = new Put(row); p.addColumn(FAM, Bytes.toBytes("val"), row); @@ -328,8 +328,7 @@ public class BaseTestHBaseFsck { Map> getDeployedHRIs(final Admin admin) throws IOException { ClusterStatus status = admin.getClusterStatus(); Collection regionServers = status.getServers(); - Map> mm = - new HashMap>(); + Map> mm = new HashMap<>(); for (ServerName hsi : regionServers) { AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi); @@ -525,7 +524,7 @@ public class BaseTestHBaseFsck { @Override public ArrayList getErrorList() { calledCount++; - return new ArrayList(); + return new ArrayList<>(); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java index 73ce71adc65..ddf26a0623d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java @@ -57,7 +57,7 @@ public class ConstantDelayQueue implements BlockingQueue { private final long delayMs; // backing DelayQueue - private DelayQueue> queue = new DelayQueue>(); + private DelayQueue> queue = new DelayQueue<>(); public ConstantDelayQueue(TimeUnit timeUnit, long delay) { this.delayMs = TimeUnit.MILLISECONDS.convert(delay, timeUnit); @@ -139,22 +139,22 @@ public class ConstantDelayQueue implements BlockingQueue { @Override public boolean add(E e) { - return queue.add(new DelayedElement(e, delayMs)); + return queue.add(new DelayedElement<>(e, delayMs)); } @Override public boolean offer(E e) { - return queue.offer(new DelayedElement(e, delayMs)); + return queue.offer(new DelayedElement<>(e, delayMs)); } @Override public void put(E e) throws InterruptedException { - queue.put(new DelayedElement(e, delayMs)); + queue.put(new DelayedElement<>(e, delayMs)); } @Override public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { - return queue.offer(new DelayedElement(e, delayMs), timeout, unit); + return queue.offer(new DelayedElement<>(e, delayMs), timeout, unit); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java index d68c5783115..bf95a9e7123 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java @@ -143,14 +143,14 @@ public class HFileArchiveTestingUtil { * @return , where each is sorted */ private static List> getFileLists(FileStatus[] previous, FileStatus[] archived) { - List> files = new ArrayList>(3); + List> files = new ArrayList<>(3); // copy over the original files List originalFileNames = convertToString(previous); files.add(originalFileNames); - List currentFiles = new ArrayList(previous.length); - List backedupFiles = new ArrayList(previous.length); + List currentFiles = new ArrayList<>(previous.length); + List backedupFiles = new ArrayList<>(previous.length); for (FileStatus f : archived) { String name = f.getPath().getName(); // if the file has been backed up @@ -177,7 +177,7 @@ public class HFileArchiveTestingUtil { } private static List convertToString(List files) { - List originalFileNames = new ArrayList(files.size()); + List originalFileNames = new ArrayList<>(files.size()); for (FileStatus f : files) { originalFileNames.add(f.getPath().getName()); } @@ -188,7 +188,7 @@ public class HFileArchiveTestingUtil { private static String compareFileLists(List expected, List gotten) { StringBuilder sb = new StringBuilder("Expected (" + expected.size() + "): \t\t Gotten (" + gotten.size() + "):\n"); - List notFound = new ArrayList(); + List notFound = new ArrayList<>(); for (String s : expected) { if (gotten.contains(s)) sb.append(s + "\t\t" + s + "\n"); else notFound.add(s); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java index 87cb070da81..2ea01bb5024 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java @@ -64,7 +64,7 @@ public class LoadTestDataGeneratorWithTags extends DefaultDataGenerator { @Override public Mutation beforeMutate(long rowkeyBase, Mutation m) throws IOException { if (m instanceof Put) { - List updatedCells = new ArrayList(); + List updatedCells = new ArrayList<>(); int numTags; if (minNumTags == maxNumTags) { numTags = minNumTags; @@ -76,7 +76,7 @@ public class LoadTestDataGeneratorWithTags extends DefaultDataGenerator { Cell cell = cellScanner.current(); byte[] tag = LoadTestTool.generateData(random, minTagLength + random.nextInt(maxTagLength - minTagLength)); - tags = new ArrayList(); + tags = new ArrayList<>(); for (int n = 0; n < numTags; n++) { tags.add(new ArrayBackedTag((byte) 127, tag)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 9d6269385b7..9a5e6f1cf24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -864,7 +864,7 @@ public class LoadTestTool extends AbstractHBaseTool { } // starting to load multiple tables - List workers = new ArrayList(); + List workers = new ArrayList<>(); for (int i = 0; i < numTables; i++) { String[] workerArgs = newArgs.clone(); workerArgs[tableNameValueIndex] = tableName + "_" + (i+1); @@ -892,7 +892,7 @@ public class LoadTestTool extends AbstractHBaseTool { // If an exception is thrown by one of worker threads, it will be // stored here. - protected AtomicReference thrown = new AtomicReference(); + protected AtomicReference thrown = new AtomicReference<>(); private void workerThreadError(Throwable t) { thrown.compareAndSet(null, t); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index db42659df16..8da92b2c782 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -533,7 +533,7 @@ public abstract class MultiThreadedAction { // Parse mutate info into a map of => private Map parseMutateInfo(byte[] mutateInfo) { - Map mi = new HashMap(); + Map mi = new HashMap<>(); if (mutateInfo != null) { String mutateInfoStr = Bytes.toString(mutateInfo); String[] mutations = mutateInfoStr.split("#"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java index 77443e12b29..e6de33d6d30 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java @@ -41,7 +41,7 @@ public class MultiThreadedReader extends MultiThreadedAction { private static final Log LOG = LogFactory.getLog(MultiThreadedReader.class); - protected Set readers = new HashSet(); + protected Set readers = new HashSet<>(); private final double verifyPercent; protected volatile boolean aborted; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java index cdf814cc254..1e7e341b5c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java @@ -43,8 +43,8 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader { * Maps user with Table instance. Because the table instance has to be created * per user inorder to work in that user's context */ - private Map userVsTable = new HashMap(); - private Map users = new HashMap(); + private Map userVsTable = new HashMap<>(); + private Map users = new HashMap<>(); private String[] userNames; public MultiThreadedReaderWithACL(LoadTestDataGenerator dataGen, Configuration conf, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java index dbcfddb3d21..1505fc1db27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java @@ -57,7 +57,7 @@ import com.google.common.base.Preconditions; public class MultiThreadedUpdater extends MultiThreadedWriterBase { private static final Log LOG = LogFactory.getLog(MultiThreadedUpdater.class); - protected Set updaters = new HashSet(); + protected Set updaters = new HashSet<>(); private MultiThreadedWriterBase writer = null; private boolean isBatchUpdate = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java index bf27dde3b21..40e23fb111f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java @@ -53,8 +53,8 @@ public class MultiThreadedUpdaterWithACL extends MultiThreadedUpdater { * Maps user with Table instance. Because the table instance has to be created * per user inorder to work in that user's context */ - private Map userVsTable = new HashMap(); - private Map users = new HashMap(); + private Map userVsTable = new HashMap<>(); + private Map users = new HashMap<>(); private String[] userNames; public MultiThreadedUpdaterWithACL(LoadTestDataGenerator dataGen, Configuration conf, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java index d53ab250a94..d62f72d53ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.StringUtils; public class MultiThreadedWriter extends MultiThreadedWriterBase { private static final Log LOG = LogFactory.getLog(MultiThreadedWriter.class); - protected Set writers = new HashSet(); + protected Set writers = new HashSet<>(); protected boolean isMultiPut = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java index 1bbd41076ec..fbf745ffb04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java @@ -60,7 +60,7 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction { protected AtomicLong wroteUpToKey = new AtomicLong(); /** The sorted set of keys NOT inserted/updated by the writers */ - protected Set failedKeySet = new ConcurrentSkipListSet(); + protected Set failedKeySet = new ConcurrentSkipListSet<>(); /** * The total size of the temporary inserted/updated key set that have not yet lined @@ -79,7 +79,7 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction { } protected BlockingQueue createWriteKeysQueue(Configuration conf) { - return new ArrayBlockingQueue(10000); + return new ArrayBlockingQueue<>(10000); } @Override @@ -129,7 +129,7 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction { Thread.currentThread().setName(getClass().getSimpleName()); try { long expectedKey = startKey; - Queue sortedKeys = new PriorityQueue(); + Queue sortedKeys = new PriorityQueue<>(); while (expectedKey < endKey) { // Block until a new element is available. Long k; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java index 6d992a5fe7c..a5cf0bd4a65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java @@ -140,7 +140,7 @@ public class ProcessBasedLocalHBaseCluster { * in the returned array, e.g. server #0, #1, etc. */ private static List sortedPorts(int n) { - List ports = new ArrayList(n); + List ports = new ArrayList<>(n); for (int i = 0; i < n; ++i) { ports.add(HBaseTestingUtility.randomFreePort()); } @@ -216,8 +216,7 @@ public class ProcessBasedLocalHBaseCluster { try { String [] envp = null; if (envOverrides != null) { - Map map = new HashMap( - System.getenv()); + Map map = new HashMap<>(System.getenv()); map.putAll(envOverrides); envp = new String[map.size()]; int idx = 0; @@ -250,7 +249,7 @@ public class ProcessBasedLocalHBaseCluster { private void shutdownAllProcesses() { LOG.info("Killing daemons using pid files"); - final List pidFiles = new ArrayList(daemonPidFiles); + final List pidFiles = new ArrayList<>(daemonPidFiles); for (String pidFile : pidFiles) { int pid = 0; try { @@ -359,7 +358,7 @@ public class ProcessBasedLocalHBaseCluster { "HBASE_ZOOKEEPER_JMX_OPTS=' '\n", dir + "/hbase-env.sh"); - Map envOverrides = new HashMap(); + Map envOverrides = new HashMap<>(); envOverrides.put("HBASE_LOG_DIR", dir); envOverrides.put("HBASE_PID_DIR", dir); try { @@ -379,7 +378,7 @@ public class ProcessBasedLocalHBaseCluster { private final String generateConfig(ServerType serverType, int rpcPort, String daemonDir) { StringBuilder sb = new StringBuilder(); - Map confMap = new TreeMap(); + Map confMap = new TreeMap<>(); confMap.put(HConstants.CLUSTER_DISTRIBUTED, true); if (serverType == ServerType.MASTER) { @@ -446,8 +445,8 @@ public class ProcessBasedLocalHBaseCluster { } private final class LocalDaemonLogTailer implements Runnable { - private final Set tailedFiles = new HashSet(); - private final List dirList = new ArrayList(); + private final Set tailedFiles = new HashSet<>(); + private final List dirList = new ArrayList<>(); private final Object printLock = new Object(); private final FilenameFilter LOG_FILES = new FilenameFilter() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java index 34c4ec01fd6..7112d507994 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java @@ -73,7 +73,7 @@ public class TestBoundedPriorityBlockingQueue { @Before public void setUp() throws Exception { - this.queue = new BoundedPriorityBlockingQueue(CAPACITY, new TestObjectComparator()); + this.queue = new BoundedPriorityBlockingQueue<>(CAPACITY, new TestObjectComparator()); } @After @@ -186,7 +186,7 @@ public class TestBoundedPriorityBlockingQueue { @Test public void testPoll() { assertNull(queue.poll()); - PriorityQueue testList = new PriorityQueue(CAPACITY, new TestObjectComparator()); + PriorityQueue testList = new PriorityQueue<>(CAPACITY, new TestObjectComparator()); for (int i = 0; i < CAPACITY; ++i) { TestObject obj = new TestObject(i, i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index 304717a263c..caf8de92ffe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -210,10 +210,8 @@ public class TestCoprocessorScanPolicy { } public static class ScanObserver implements RegionObserver { - private Map ttls = - new HashMap(); - private Map versions = - new HashMap(); + private Map ttls = new HashMap<>(); + private Map versions = new HashMap<>(); // lame way to communicate with the coprocessor, // since it is loaded by a different class loader @@ -301,4 +299,4 @@ public class TestCoprocessorScanPolicy { } } } -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java index 4a870f8ea54..e455b0ad270 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java @@ -59,9 +59,9 @@ public class TestFSVisitor { fs = FileSystem.get(TEST_UTIL.getConfiguration()); rootDir = TEST_UTIL.getDataTestDir("hbase"); - tableFamilies = new HashSet(); - tableRegions = new HashSet(); - tableHFiles = new HashSet(); + tableFamilies = new HashSet<>(); + tableRegions = new HashSet<>(); + tableHFiles = new HashSet<>(); tableDir = createTableFiles(rootDir, TABLE_NAME, tableRegions, tableFamilies, tableHFiles); FSUtils.logFileSystemState(fs, rootDir, LOG); } @@ -73,9 +73,9 @@ public class TestFSVisitor { @Test public void testVisitStoreFiles() throws IOException { - final Set regions = new HashSet(); - final Set families = new HashSet(); - final Set hfiles = new HashSet(); + final Set regions = new HashSet<>(); + final Set families = new HashSet<>(); + final Set hfiles = new HashSet<>(); FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() { public void storeFile(final String region, final String family, final String hfileName) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java index 501bfc43cf3..54f310d8643 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java @@ -139,7 +139,7 @@ public class TestHBaseFsckEncryption { } private List findStorefilePaths(TableName tableName) throws Exception { - List paths = new ArrayList(); + List paths = new ArrayList<>(); for (Region region: TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(htd.getTableName())) { for (Store store: region.getStores()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index b04689cef51..b6a185b6f07 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -62,7 +62,7 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck { TEST_UTIL.startMiniCluster(1); tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, - new SynchronousQueue(), Threads.newDaemonThreadFactory("testhbck")); + new SynchronousQueue<>(), Threads.newDaemonThreadFactory("testhbck")); hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index 0e3355ae51b..1d09dfac875 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -112,7 +112,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { TEST_UTIL.startMiniCluster(1); tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, - new SynchronousQueue(), Threads.newDaemonThreadFactory("testhbck")); + new SynchronousQueue<>(), Threads.newDaemonThreadFactory("testhbck")); hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); @@ -1402,7 +1402,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { setupTable(tableName); // Mess it up by removing the RegionInfo for one region. - final List deletes = new LinkedList(); + final List deletes = new LinkedList<>(); Table meta = connection.getTable(TableName.META_TABLE_NAME, hbfsckExecutorService); MetaTableAccessor.fullScanRegions(connection, new MetaTableAccessor.Visitor() { @@ -1630,7 +1630,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { am.regionOffline(state.getRegion()); } - Map regionsMap = new HashMap(); + Map regionsMap = new HashMap<>(); regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName()); am.assign(regionsMap); am.waitForAssignment(regions.get(0).getRegionInfo()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java index 9b92a69da47..7956d40c22c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java @@ -74,7 +74,7 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck { TEST_UTIL.startMiniCluster(3); tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, - new SynchronousQueue(), Threads.newDaemonThreadFactory("testhbck")); + new SynchronousQueue<>(), Threads.newDaemonThreadFactory("testhbck")); hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); @@ -255,7 +255,7 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck { } // get all the online regions in the regionservers Collection servers = admin.getClusterStatus().getServers(); - Set onlineRegions = new HashSet(); + Set onlineRegions = new HashSet<>(); for (ServerName s : servers) { List list = admin.getOnlineRegions(s); onlineRegions.addAll(list); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java index 99a41f59aed..91a71c7fd03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java @@ -86,7 +86,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { TEST_UTIL.startMiniCluster(2); tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, - new SynchronousQueue(), Threads.newDaemonThreadFactory("testhbck")); + new SynchronousQueue<>(), Threads.newDaemonThreadFactory("testhbck")); hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java index fbfbb472a50..c3f934dee65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java @@ -50,7 +50,7 @@ public class TestIdLock { private IdLock idLock = new IdLock(); - private Map idOwner = new ConcurrentHashMap(); + private Map idOwner = new ConcurrentHashMap<>(); private class IdLockTestThread implements Callable { @@ -95,8 +95,7 @@ public class TestIdLock { public void testMultipleClients() throws Exception { ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS); try { - ExecutorCompletionService ecs = - new ExecutorCompletionService(exec); + ExecutorCompletionService ecs = new ExecutorCompletionService<>(exec); for (int i = 0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i)); for (int i = 0; i < NUM_THREADS; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java index 66f6d4b1e6d..2ccfad8f2f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdReadWriteLock.java @@ -53,7 +53,7 @@ public class TestIdReadWriteLock { private IdReadWriteLock idLock = new IdReadWriteLock(); - private Map idOwner = new ConcurrentHashMap(); + private Map idOwner = new ConcurrentHashMap<>(); private class IdLockTestThread implements Callable { @@ -104,8 +104,7 @@ public class TestIdReadWriteLock { public void testMultipleClients() throws Exception { ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS); try { - ExecutorCompletionService ecs = - new ExecutorCompletionService(exec); + ExecutorCompletionService ecs = new ExecutorCompletionService<>(exec); for (int i = 0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i)); for (int i = 0; i < NUM_THREADS; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadEncoded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadEncoded.java index 0cf46098c03..865cd110054 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadEncoded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadEncoded.java @@ -40,7 +40,7 @@ public class TestMiniClusterLoadEncoded extends TestMiniClusterLoadParallel { @Parameters public static Collection parameters() { - List parameters = new ArrayList(); + List parameters = new ArrayList<>(); for (DataBlockEncoding dataBlockEncoding : DataBlockEncoding.values() ) { parameters.add(new Object[]{dataBlockEncoding}); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java index 726a450d857..f7652217e35 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java @@ -88,7 +88,7 @@ public class TestMiniClusterLoadSequential { @Parameters public static Collection parameters() { - List parameters = new ArrayList(); + List parameters = new ArrayList<>(); for (boolean multiPut : new boolean[]{false, true}) { for (DataBlockEncoding dataBlockEncoding : new DataBlockEncoding[] { DataBlockEncoding.NONE, DataBlockEncoding.PREFIX }) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestPoolMap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestPoolMap.java index b229e910e20..7fc09d2dfc8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestPoolMap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestPoolMap.java @@ -46,7 +46,7 @@ public class TestPoolMap { @Override protected void setUp() throws Exception { - this.poolMap = new PoolMap(getPoolType(), POOL_SIZE); + this.poolMap = new PoolMap<>(getPoolType(), POOL_SIZE); } protected abstract PoolType getPoolType(); @@ -117,7 +117,7 @@ public class TestPoolMap { public void testPoolCap() throws InterruptedException, ExecutionException { String randomKey = String.valueOf(random.nextInt()); - List randomValues = new ArrayList(); + List randomValues = new ArrayList<>(); for (int i = 0; i < POOL_SIZE * 2; i++) { String randomValue = String.valueOf(random.nextInt()); randomValues.add(randomValue); @@ -219,7 +219,7 @@ public class TestPoolMap { public void testPoolCap() throws InterruptedException, ExecutionException { // As long as we poll values we put, the pool size should remain zero String randomKey = String.valueOf(random.nextInt()); - List randomValues = new ArrayList(); + List randomValues = new ArrayList<>(); for (int i = 0; i < POOL_SIZE * 2; i++) { String randomValue = String.valueOf(random.nextInt()); randomValues.add(randomValue); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java index e7a6500cb30..51dc2380e5d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java @@ -135,7 +135,7 @@ public class TestRegionSizeCalculator { */ private Admin mockAdmin(RegionLoad... regionLoadArray) throws Exception { Admin mockAdmin = Mockito.mock(Admin.class); - Map regionLoads = new TreeMap(Bytes.BYTES_COMPARATOR); + Map regionLoads = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (RegionLoad regionLoad : regionLoadArray) { regionLoads.put(regionLoad.getName(), regionLoad); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java index ea2bc7abb8b..931830f3dd9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java @@ -126,8 +126,7 @@ public class TestRegionSplitCalculator { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B")); SimpleRange b = new SimpleRange(Bytes.toBytes("B"), Bytes.toBytes("C")); SimpleRange c = new SimpleRange(Bytes.toBytes("C"), Bytes.toBytes("D")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); sc.add(b); sc.add(c); @@ -142,8 +141,7 @@ public class TestRegionSplitCalculator { @Test public void testSplitCalculatorNoEdge() { - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); Multimap regions = sc.calcCoverage(); LOG.info("Empty"); @@ -155,8 +153,7 @@ public class TestRegionSplitCalculator { @Test public void testSplitCalculatorSingleEdge() { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); Multimap regions = sc.calcCoverage(); @@ -169,8 +166,7 @@ public class TestRegionSplitCalculator { @Test public void testSplitCalculatorDegenerateEdge() { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("A")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); Multimap regions = sc.calcCoverage(); @@ -185,8 +181,7 @@ public class TestRegionSplitCalculator { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B")); SimpleRange b = new SimpleRange(Bytes.toBytes("B"), Bytes.toBytes("C")); SimpleRange c = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); sc.add(b); sc.add(c); @@ -204,8 +199,7 @@ public class TestRegionSplitCalculator { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B")); SimpleRange b = new SimpleRange(Bytes.toBytes("B"), Bytes.toBytes("C")); SimpleRange c = new SimpleRange(Bytes.toBytes("B"), Bytes.toBytes("D")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); sc.add(b); sc.add(c); @@ -223,8 +217,7 @@ public class TestRegionSplitCalculator { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B")); SimpleRange b = new SimpleRange(Bytes.toBytes("B"), Bytes.toBytes("C")); SimpleRange c = new SimpleRange(Bytes.toBytes("E"), Bytes.toBytes("F")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); sc.add(b); sc.add(c); @@ -241,8 +234,7 @@ public class TestRegionSplitCalculator { public void testSplitCalculatorOverreach() { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C")); SimpleRange b = new SimpleRange(Bytes.toBytes("B"), Bytes.toBytes("D")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); sc.add(b); @@ -258,8 +250,7 @@ public class TestRegionSplitCalculator { public void testSplitCalculatorFloor() { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C")); SimpleRange b = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); sc.add(b); @@ -274,8 +265,7 @@ public class TestRegionSplitCalculator { public void testSplitCalculatorCeil() { SimpleRange a = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C")); SimpleRange b = new SimpleRange(Bytes.toBytes("B"), Bytes.toBytes("C")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); sc.add(b); @@ -292,8 +282,7 @@ public class TestRegionSplitCalculator { SimpleRange b = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C")); LOG.info(a.tiebreaker + " - " + b.tiebreaker); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); sc.add(b); @@ -307,8 +296,7 @@ public class TestRegionSplitCalculator { @Test public void testSplitCalculatorBackwards() { SimpleRange a = new SimpleRange(Bytes.toBytes("C"), Bytes.toBytes("A")); - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(a); Multimap regions = sc.calcCoverage(); @@ -320,8 +308,7 @@ public class TestRegionSplitCalculator { @Test public void testComplex() { - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("Am"))); sc.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C"))); sc.add(new SimpleRange(Bytes.toBytes("Am"), Bytes.toBytes("C"))); @@ -344,8 +331,7 @@ public class TestRegionSplitCalculator { @Test public void testBeginEndMarker() { - RegionSplitCalculator sc = new RegionSplitCalculator( - cmp); + RegionSplitCalculator sc = new RegionSplitCalculator<>(cmp); sc.add(new SimpleRange(Bytes.toBytes(""), Bytes.toBytes("A"))); sc.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B"))); sc.add(new SimpleRange(Bytes.toBytes("B"), Bytes.toBytes(""))); @@ -364,7 +350,7 @@ public class TestRegionSplitCalculator { SimpleRange ae = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("E")); SimpleRange ac = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C")); - Collection bigOverlap = new ArrayList(8); + Collection bigOverlap = new ArrayList<>(8); bigOverlap.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("E"))); bigOverlap.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C"))); bigOverlap.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B"))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java index c195762ac1f..0c5b9800398 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java @@ -78,7 +78,7 @@ public class TestRegionSplitter { */ @Test public void testCreatePresplitTableHex() throws Exception { - final List expectedBounds = new ArrayList(17); + final List expectedBounds = new ArrayList<>(17); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); expectedBounds.add("10000000".getBytes()); expectedBounds.add("20000000".getBytes()); @@ -108,7 +108,7 @@ public class TestRegionSplitter { */ @Test public void testCreatePresplitTableUniform() throws Exception { - List expectedBounds = new ArrayList(17); + List expectedBounds = new ArrayList<>(17); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); expectedBounds.add(new byte[] { 0x10, 0, 0, 0, 0, 0, 0, 0}); expectedBounds.add(new byte[] { 0x20, 0, 0, 0, 0, 0, 0, 0}); @@ -293,7 +293,7 @@ public class TestRegionSplitter { @Test public void noopRollingSplit() throws Exception { - final List expectedBounds = new ArrayList(1); + final List expectedBounds = new ArrayList<>(1); expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY); rollingSplitAndVerify(TableName.valueOf(TestRegionSplitter.class.getSimpleName()), "UniformSplit", expectedBounds); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedCopyOnWriteSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedCopyOnWriteSet.java index 839d1cc1039..0efa6da28e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedCopyOnWriteSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedCopyOnWriteSet.java @@ -34,7 +34,7 @@ public class TestSortedCopyOnWriteSet { @Test public void testSorting() throws Exception { - SortedCopyOnWriteSet set = new SortedCopyOnWriteSet(); + SortedCopyOnWriteSet set = new SortedCopyOnWriteSet<>(); set.add("c"); set.add("d"); set.add("a"); @@ -52,8 +52,7 @@ public class TestSortedCopyOnWriteSet { @Test public void testIteratorIsolation() throws Exception { - SortedCopyOnWriteSet set = new SortedCopyOnWriteSet( - Lists.newArrayList("a", "b", "c", "d", "e")); + SortedCopyOnWriteSet set = new SortedCopyOnWriteSet<>(Lists.newArrayList("a", "b", "c", "d", "e")); // isolation of remove() Iterator iter = set.iterator(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedList.java index 454435b5739..bdae0e513be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestSortedList.java @@ -45,7 +45,7 @@ public class TestSortedList { @Test public void testSorting() throws Exception { - SortedList list = new SortedList(new StringComparator()); + SortedList list = new SortedList<>(new StringComparator()); list.add("c"); list.add("d"); list.add("a"); @@ -72,8 +72,7 @@ public class TestSortedList { @Test public void testReadOnlyIterators() throws Exception { - SortedList list = new SortedList( - Lists.newArrayList("a", "b", "c", "d", "e"), new StringComparator()); + SortedList list = new SortedList<>(Lists.newArrayList("a", "b", "c", "d", "e"), new StringComparator()); Iterator i = list.iterator(); i.next(); @@ -108,8 +107,7 @@ public class TestSortedList { @Test public void testIteratorIsolation() throws Exception { - SortedList list = new SortedList( - Lists.newArrayList("a", "b", "c", "d", "e"), new StringComparator()); + SortedList list = new SortedList<>(Lists.newArrayList("a", "b", "c", "d", "e"), new StringComparator()); // isolation of remove() Iterator iter = list.iterator(); @@ -161,8 +159,7 @@ public class TestSortedList { @Test public void testRandomAccessIsolation() throws Exception { - SortedList list = new SortedList( - Lists.newArrayList("a", "b", "c"), new StringComparator()); + SortedList list = new SortedList<>(Lists.newArrayList("a", "b", "c"), new StringComparator()); List innerList = list.get(); assertEquals("a", innerList.get(0)); assertEquals("b", innerList.get(1)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index 3701094fd65..a7c4ad140ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -228,7 +228,7 @@ public class OfflineMetaRebuildTestCore { Scan s = new Scan(); Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); ResultScanner scanner = meta.getScanner(s); - List dels = new ArrayList(); + List dels = new ArrayList<>(); for (Result r : scanner) { HRegionInfo info = MetaTableAccessor.getHRegionInfo(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java index 2d32b5e6530..e2ea838bc74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java @@ -108,7 +108,7 @@ public class IOTestProvider implements WALProvider { @Override public List getWALs() throws IOException { - List wals = new ArrayList(1); + List wals = new ArrayList<>(1); wals.add(log); return wals; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java index 8523e6923ad..73725bb4e3a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java @@ -178,7 +178,7 @@ public class TestBoundedRegionGroupingStrategy { FSUtils.setRootDir(CONF, TEST_UTIL.getDataTestDirOnTestFS()); wals = new WALFactory(CONF, null, "setMembershipDedups"); - final Set seen = new HashSet(temp * 4); + final Set seen = new HashSet<>(temp * 4); final Random random = new Random(); int count = 0; // we know that this should see one of the wals more than once diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java index d82c3b668a6..f752735683f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java @@ -193,12 +193,12 @@ public class TestFSHLogProvider { final HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(currentTest.getMethodName() + "2")) .addFamily(new HColumnDescriptor("row")); - NavigableMap scopes1 = new TreeMap( + NavigableMap scopes1 = new TreeMap<>( Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes1.put(fam, 0); } - NavigableMap scopes2 = new TreeMap( + NavigableMap scopes2 = new TreeMap<>( Bytes.BYTES_COMPARATOR); for(byte[] fam : htd2.getFamiliesKeys()) { scopes2.put(fam, 0); @@ -275,12 +275,12 @@ public class TestFSHLogProvider { new HTableDescriptor(TableName.valueOf(currentTest.getMethodName() + "1")).addFamily(new HColumnDescriptor("row")); HTableDescriptor table2 = new HTableDescriptor(TableName.valueOf(currentTest.getMethodName() + "2")).addFamily(new HColumnDescriptor("row")); - NavigableMap scopes1 = new TreeMap( + NavigableMap scopes1 = new TreeMap<>( Bytes.BYTES_COMPARATOR); for(byte[] fam : table1.getFamiliesKeys()) { scopes1.put(fam, 0); } - NavigableMap scopes2 = new TreeMap( + NavigableMap scopes2 = new TreeMap<>( Bytes.BYTES_COMPARATOR); for(byte[] fam : table2.getFamiliesKeys()) { scopes2.put(fam, 0); @@ -370,7 +370,7 @@ public class TestFSHLogProvider { localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName()); final WALFactory wals = new WALFactory(localConf, null, currentTest.getMethodName()); try { - final Set seen = new HashSet(1); + final Set seen = new HashSet<>(1); final Random random = new Random(); assertTrue("first attempt to add WAL from default provider should work.", seen.add(wals.getWAL(Bytes.toBytes(random.nextInt()), null))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java index 913ea48919d..7497d679ee8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java @@ -116,8 +116,7 @@ public class TestSecureWAL { TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^a-zA-Z0-9]", "_")); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(tableName.getName())); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index 3318f61b087..f02e244b3fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -180,8 +180,7 @@ public class TestWALFactory { } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("column")); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -259,8 +258,7 @@ public class TestWALFactory { null,null, false); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(tableName.getName())); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -385,8 +383,7 @@ public class TestWALFactory { HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(tableName.getName())); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -637,8 +634,7 @@ public class TestWALFactory { long timestamp = System.currentTimeMillis(); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("column")); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java index c69150f826c..65401dea3b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java @@ -107,8 +107,7 @@ public class TestWALFiltering { public void testFlushedSequenceIdsSentToHMaster() throws IOException, InterruptedException, org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException, ServiceException { - SortedMap allFlushedSequenceIds = - new TreeMap(Bytes.BYTES_COMPARATOR); + SortedMap allFlushedSequenceIds = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < NUM_RS; ++i) { flushAllRegions(i); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java index 6f4a797e6cc..ecde00d2f13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java @@ -104,8 +104,7 @@ public class TestWALReaderOnSecureWAL { TableName tableName = TableName.valueOf(tblName); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(tableName.getName())); - NavigableMap scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index 3b15cefd01c..611f8c31455 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -135,7 +135,7 @@ public class TestWALSplit { private static final byte[] QUALIFIER = "q1".getBytes(); private static final byte[] VALUE = "v1".getBytes(); private static final String WAL_FILE_PREFIX = "wal.dat."; - private static List REGIONS = new ArrayList(); + private static List REGIONS = new ArrayList<>(); private static final String HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors"; private static String ROBBER; private static String ZOMBIE; @@ -158,7 +158,7 @@ public class TestWALSplit { // This is how you turn off shortcircuit read currently. TODO: Fix. Should read config. System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); // Create fake maping user to group and set it to the conf. - Map u2g_map = new HashMap(2); + Map u2g_map = new HashMap<>(2); ROBBER = User.getCurrent().getName() + "-robber"; ZOMBIE = User.getCurrent().getName() + "-zombie"; u2g_map.put(ROBBER, GROUP); @@ -585,7 +585,7 @@ public class TestWALSplit { .filter(x -> x != FaultyProtobufLogReader.FailureType.NONE).collect(Collectors.toList()); for (FaultyProtobufLogReader.FailureType failureType : failureTypes) { final Set walDirContents = splitCorruptWALs(failureType); - final Set archivedLogs = new HashSet(); + final Set archivedLogs = new HashSet<>(); final StringBuilder archived = new StringBuilder("Archived logs in CORRUPTDIR:"); for (FileStatus log : fs.listStatus(CORRUPTDIR)) { archived.append("\n\t").append(log.toString()); @@ -630,7 +630,7 @@ public class TestWALSplit { wals = new WALFactory(conf, null, name.getMethodName()); generateWALs(-1); // Our reader will render all of these files corrupt. - final Set walDirContents = new HashSet(); + final Set walDirContents = new HashSet<>(); for (FileStatus status : fs.listStatus(WALDIR)) { walDirContents.add(status.getPath().getName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java index 9bb3d7d8dda..53cc49b84c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java @@ -141,8 +141,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { this.numFamilies = htd.getColumnFamilyCount(); this.region = region; this.htd = htd; - scopes = new TreeMap( - Bytes.BYTES_COMPARATOR); + scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); for(byte[] fam : htd.getFamiliesKeys()) { scopes.put(fam, 0); } @@ -420,7 +419,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { throws IOException { WAL.Reader reader = wals.createReader(wal.getFileSystem(getConf()), wal); long count = 0; - Map sequenceIds = new HashMap(); + Map sequenceIds = new HashMap<>(); try { while (true) { WAL.Entry e = reader.next(); @@ -490,7 +489,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { System.exit(1); } - private final Set walsListenedTo = new HashSet(); + private final Set walsListenedTo = new HashSet<>(); private HRegion openRegion(final FileSystem fs, final Path dir, final HTableDescriptor htd, final WALFactory wals, final long whenToRoll, final LogRoller roller) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java index 6350af8b5c4..b4ac59c7465 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java @@ -85,24 +85,24 @@ public class TestZKMulti { ZKUtil.multiOrSequential(zkw, null, false); // empty multi - ZKUtil.multiOrSequential(zkw, new LinkedList(), false); + ZKUtil.multiOrSequential(zkw, new LinkedList<>(), false); // single create String path = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSimpleMulti"); - LinkedList singleCreate = new LinkedList(); + LinkedList singleCreate = new LinkedList<>(); singleCreate.add(ZKUtilOp.createAndFailSilent(path, new byte[0])); ZKUtil.multiOrSequential(zkw, singleCreate, false); assertTrue(ZKUtil.checkExists(zkw, path) != -1); // single setdata - LinkedList singleSetData = new LinkedList(); + LinkedList singleSetData = new LinkedList<>(); byte [] data = Bytes.toBytes("foobar"); singleSetData.add(ZKUtilOp.setData(path, data)); ZKUtil.multiOrSequential(zkw, singleSetData, false); assertTrue(Bytes.equals(ZKUtil.getData(zkw, path), data)); // single delete - LinkedList singleDelete = new LinkedList(); + LinkedList singleDelete = new LinkedList<>(); singleDelete.add(ZKUtilOp.deleteNodeFailSilent(path)); ZKUtil.multiOrSequential(zkw, singleDelete, false); assertTrue(ZKUtil.checkExists(zkw, path) == -1); @@ -117,7 +117,7 @@ public class TestZKMulti { String path5 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti5"); String path6 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti6"); // create 4 nodes that we'll setData on or delete later - LinkedList create4Nodes = new LinkedList(); + LinkedList create4Nodes = new LinkedList<>(); create4Nodes.add(ZKUtilOp.createAndFailSilent(path1, Bytes.toBytes(path1))); create4Nodes.add(ZKUtilOp.createAndFailSilent(path2, Bytes.toBytes(path2))); create4Nodes.add(ZKUtilOp.createAndFailSilent(path3, Bytes.toBytes(path3))); @@ -129,7 +129,7 @@ public class TestZKMulti { assertTrue(Bytes.equals(ZKUtil.getData(zkw, path4), Bytes.toBytes(path4))); // do multiple of each operation (setData, delete, create) - LinkedList ops = new LinkedList(); + LinkedList ops = new LinkedList<>(); // setData ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); ops.add(ZKUtilOp.setData(path2, Bytes.add(Bytes.toBytes(path2), Bytes.toBytes(path2)))); @@ -155,7 +155,7 @@ public class TestZKMulti { // try to delete a node that doesn't exist boolean caughtNoNode = false; String path = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureZ"); - LinkedList ops = new LinkedList(); + LinkedList ops = new LinkedList<>(); ops.add(ZKUtilOp.deleteNodeFailSilent(path)); try { ZKUtil.multiOrSequential(zkw, ops, false); @@ -166,7 +166,7 @@ public class TestZKMulti { // try to setData on a node that doesn't exist caughtNoNode = false; - ops = new LinkedList(); + ops = new LinkedList<>(); ops.add(ZKUtilOp.setData(path, Bytes.toBytes(path))); try { ZKUtil.multiOrSequential(zkw, ops, false); @@ -177,7 +177,7 @@ public class TestZKMulti { // try to create on a node that already exists boolean caughtNodeExists = false; - ops = new LinkedList(); + ops = new LinkedList<>(); ops.add(ZKUtilOp.createAndFailSilent(path, Bytes.toBytes(path))); ZKUtil.multiOrSequential(zkw, ops, false); try { @@ -194,7 +194,7 @@ public class TestZKMulti { String pathA = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiA"); String pathB = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiB"); String pathC = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiC"); - LinkedList ops = new LinkedList(); + LinkedList ops = new LinkedList<>(); ops.add(ZKUtilOp.createAndFailSilent(pathA, Bytes.toBytes(pathA))); ops.add(ZKUtilOp.createAndFailSilent(pathB, Bytes.toBytes(pathB))); ops.add(ZKUtilOp.deleteNodeFailSilent(pathC)); @@ -217,14 +217,14 @@ public class TestZKMulti { String pathY = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureY"); String pathZ = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureZ"); // create X that we will use to fail create later - LinkedList ops = new LinkedList(); + LinkedList ops = new LinkedList<>(); ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); ZKUtil.multiOrSequential(zkw, ops, false); // fail one of each create ,setData, delete String pathV = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureV"); String pathW = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureW"); - ops = new LinkedList(); + ops = new LinkedList<>(); ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); // fail -- already exists ops.add(ZKUtilOp.setData(pathY, Bytes.toBytes(pathY))); // fail -- doesn't exist ops.add(ZKUtilOp.deleteNodeFailSilent(pathZ)); // fail -- doesn't exist @@ -246,7 +246,7 @@ public class TestZKMulti { assertTrue(ZKUtil.checkExists(zkw, pathV) == -1); // test that with multiple failures, throws an exception corresponding to first failure in list - ops = new LinkedList(); + ops = new LinkedList<>(); ops.add(ZKUtilOp.setData(pathY, Bytes.toBytes(pathY))); // fail -- doesn't exist ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); // fail -- exists boolean caughtNoNode = false; @@ -273,14 +273,14 @@ public class TestZKMulti { String path4 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential4"); // create some nodes that we will use later - LinkedList ops = new LinkedList(); + LinkedList ops = new LinkedList<>(); ops.add(ZKUtilOp.createAndFailSilent(path1, Bytes.toBytes(path1))); ops.add(ZKUtilOp.createAndFailSilent(path2, Bytes.toBytes(path2))); ZKUtil.multiOrSequential(zkw, ops, false); // test that, even with operations that fail, the ones that would pass will pass // with runSequentialOnMultiFailure - ops = new LinkedList(); + ops = new LinkedList<>(); ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); // pass ops.add(ZKUtilOp.deleteNodeFailSilent(path2)); // pass ops.add(ZKUtilOp.deleteNodeFailSilent(path3)); // fail -- node doesn't exist @@ -368,7 +368,7 @@ public class TestZKMulti { private void createZNodeTree(String rootZNode) throws KeeperException, InterruptedException { - List opList = new ArrayList(); + List opList = new ArrayList<>(); opList.add(Op.create(rootZNode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT)); int level = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java index 26329f6cda4..89164f45f04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java @@ -325,7 +325,7 @@ public class TestZooKeeperACL { if (!secureZKAvailable) { return; } - List drainingServers = new ArrayList(1); + List drainingServers = new ArrayList<>(1); drainingServers.add(ServerName.parseServerName("ZZZ,123,123")); // If unable to connect to secure ZK cluster then this operation would fail. diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java index 59e5856bd7c..315d6b02839 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/CallQueue.java @@ -133,7 +133,7 @@ public class CallQueue implements BlockingQueue { throw new IllegalArgumentException( "A BlockingQueue cannot drain to itself."); } - List drained = new ArrayList(); + List drained = new ArrayList<>(); underlyingQueue.drainTo(drained, maxElements); for (Call r : drained) { updateMetrics(r); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java index 2f4336b81f8..221786af392 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java @@ -154,7 +154,7 @@ public class IncrementCoalescer implements IncrementCoalescerMBean { private final AtomicLong successfulCoalescings = new AtomicLong(); private final AtomicLong totalIncrements = new AtomicLong(); private final ConcurrentMap countersMap = - new ConcurrentHashMap(100000, 0.75f, 1500); + new ConcurrentHashMap<>(100000, 0.75f, 1500); private final ThreadPoolExecutor pool; private final HBaseHandler handler; @@ -166,7 +166,7 @@ public class IncrementCoalescer implements IncrementCoalescerMBean { @SuppressWarnings("deprecation") public IncrementCoalescer(HBaseHandler hand) { this.handler = hand; - LinkedBlockingQueue queue = new LinkedBlockingQueue(); + LinkedBlockingQueue queue = new LinkedBlockingQueue<>(); pool = new ThreadPoolExecutor(CORE_POOL_SIZE, CORE_POOL_SIZE, 50, TimeUnit.MILLISECONDS, queue, Threads.newDaemonThreadFactory("IncrementCoalescer")); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java index 2a1a3981964..b01bacfc2ff 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java @@ -146,10 +146,10 @@ public class TBoundedThreadPoolServer extends TServer { int maxWorkerThreads = options.maxWorkerThreads; if (options.maxQueuedRequests > 0) { this.callQueue = new CallQueue( - new LinkedBlockingQueue(options.maxQueuedRequests), metrics); + new LinkedBlockingQueue<>(options.maxQueuedRequests), metrics); minWorkerThreads = maxWorkerThreads; } else { - this.callQueue = new CallQueue(new SynchronousQueue(), metrics); + this.callQueue = new CallQueue(new SynchronousQueue<>(), metrics); } ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index 3eacfb950cc..0829188e99c 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -299,7 +299,7 @@ public class ThriftServerRunner implements Runnable { } public static List serversThatCannotSpecifyBindIP() { - List l = new ArrayList(); + List l = new ArrayList<>(); for (ImplType t : values()) { if (!t.canSpecifyBindIP) { l.add(t.simpleClassName()); @@ -396,7 +396,7 @@ public class ThriftServerRunner implements Runnable { private void setupHTTPServer() throws IOException { TProtocolFactory protocolFactory = new TBinaryProtocol.Factory(); - TProcessor processor = new Hbase.Processor(handler); + TProcessor processor = new Hbase.Processor<>(handler); TServlet thriftHttpServlet = new ThriftHttpServlet(processor, protocolFactory, realUser, conf, hbaseHandler, securityEnabled, doAsEnabled); @@ -496,7 +496,7 @@ public class ThriftServerRunner implements Runnable { protocolFactory = new TBinaryProtocol.Factory(); } - final TProcessor p = new Hbase.Processor(handler); + final TProcessor p = new Hbase.Processor<>(handler); ImplType implType = ImplType.getServerImpl(conf); TProcessor processor = p; @@ -516,7 +516,7 @@ public class ThriftServerRunner implements Runnable { // Extract the name from the principal String name = SecurityUtil.getUserFromPrincipal( conf.get("hbase.thrift.kerberos.principal")); - Map saslProperties = new HashMap(); + Map saslProperties = new HashMap<>(); saslProperties.put(Sasl.QOP, qop); TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory(); saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, @@ -591,8 +591,7 @@ public class ThriftServerRunner implements Runnable { tserver = new TNonblockingServer(serverArgs); } else if (implType == ImplType.HS_HA) { THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport); - CallQueue callQueue = - new CallQueue(new LinkedBlockingQueue(), metrics); + CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics); ExecutorService executorService = createExecutor( callQueue, serverArgs.getMaxWorkerThreads(), serverArgs.getMaxWorkerThreads()); serverArgs.executorService(executorService) @@ -603,8 +602,7 @@ public class ThriftServerRunner implements Runnable { } else { // THREADED_SELECTOR TThreadedSelectorServer.Args serverArgs = new HThreadedSelectorServerArgs(serverTransport, conf); - CallQueue callQueue = - new CallQueue(new LinkedBlockingQueue(), metrics); + CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics); ExecutorService executorService = createExecutor( callQueue, serverArgs.getWorkerThreads(), serverArgs.getWorkerThreads()); serverArgs.executorService(executorService) @@ -781,7 +779,7 @@ public class ThriftServerRunner implements Runnable { protected HBaseHandler(final Configuration c, final UserProvider userProvider) throws IOException { this.conf = c; - scannerMap = new HashMap(); + scannerMap = new HashMap<>(); this.coalescer = new IncrementCoalescer(this); int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000); @@ -869,7 +867,7 @@ public class ThriftServerRunner implements Runnable { public List getTableNames() throws IOError { try { TableName[] tableNames = this.getAdmin().listTableNames(); - ArrayList list = new ArrayList(tableNames.length); + ArrayList list = new ArrayList<>(tableNames.length); for (int i = 0; i < tableNames.length; i++) { list.add(ByteBuffer.wrap(tableNames[i].getName())); } @@ -888,7 +886,7 @@ public class ThriftServerRunner implements Runnable { throws IOError { try (RegionLocator locator = connectionCache.getRegionLocator(getBytes(tableName))) { List regionLocations = locator.getAllRegionLocations(); - List results = new ArrayList(regionLocations.size()); + List results = new ArrayList<>(regionLocations.size()); for (HRegionLocation regionLocation : regionLocations) { HRegionInfo info = regionLocation.getRegionInfo(); ServerName serverName = regionLocation.getServerName(); @@ -1151,7 +1149,7 @@ public class ThriftServerRunner implements Runnable { Table table= null; try { - List gets = new ArrayList(rows.size()); + List gets = new ArrayList<>(rows.size()); table = getTable(tableName); if (metrics != null) { metrics.incNumRowKeysInBatchGet(rows.size()); @@ -1363,8 +1361,8 @@ public class ThriftServerRunner implements Runnable { ByteBuffer tableName, List rowBatches, long timestamp, Map attributes) throws IOError, IllegalArgument, TException { - List puts = new ArrayList(); - List deletes = new ArrayList(); + List puts = new ArrayList<>(); + List deletes = new ArrayList<>(); for (BatchMutation batch : rowBatches) { byte[] row = getBytes(batch.row); @@ -1479,7 +1477,7 @@ public class ThriftServerRunner implements Runnable { try { results = resultScannerWrapper.getScanner().next(nbRows); if (null == results) { - return new ArrayList(); + return new ArrayList<>(); } } catch (IOException e) { LOG.warn(e.getMessage(), e); @@ -1709,8 +1707,7 @@ public class ThriftServerRunner implements Runnable { Table table = null; try { - TreeMap columns = - new TreeMap(); + TreeMap columns = new TreeMap<>(); table = getTable(tableName); HTableDescriptor desc = table.getTableDescriptor(); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java index d2a95ce5a64..7ec49fbc69e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java @@ -107,7 +107,7 @@ public class ThriftUtilities { * @return Thrift TCell array */ static public List cellFromHBase(Cell in) { - List list = new ArrayList(1); + List list = new ArrayList<>(1); if (in != null) { list.add(new TCell(ByteBuffer.wrap(CellUtil.cloneValue(in)), in.getTimestamp())); } @@ -123,12 +123,12 @@ public class ThriftUtilities { static public List cellFromHBase(Cell[] in) { List list = null; if (in != null) { - list = new ArrayList(in.length); + list = new ArrayList<>(in.length); for (int i = 0; i < in.length; i++) { list.add(new TCell(ByteBuffer.wrap(CellUtil.cloneValue(in[i])), in[i].getTimestamp())); } } else { - list = new ArrayList(0); + list = new ArrayList<>(0); } return list; } @@ -149,7 +149,7 @@ public class ThriftUtilities { * @return Thrift TRowResult array */ static public List rowResultFromHBase(Result[] in, boolean sortColumns) { - List results = new ArrayList(in.length); + List results = new ArrayList<>(in.length); for ( Result result_ : in) { if(result_ == null || result_.isEmpty()) { continue; @@ -157,7 +157,7 @@ public class ThriftUtilities { TRowResult result = new TRowResult(); result.row = ByteBuffer.wrap(result_.getRow()); if (sortColumns) { - result.sortedColumns = new ArrayList(); + result.sortedColumns = new ArrayList<>(); for (Cell kv : result_.rawCells()) { result.sortedColumns.add(new TColumn( ByteBuffer.wrap(KeyValue.makeColumn(CellUtil.cloneFamily(kv), @@ -165,7 +165,7 @@ public class ThriftUtilities { new TCell(ByteBuffer.wrap(CellUtil.cloneValue(kv)), kv.getTimestamp()))); } } else { - result.columns = new TreeMap(); + result.columns = new TreeMap<>(); for (Cell kv : result_.rawCells()) { result.columns.put( ByteBuffer.wrap(KeyValue.makeColumn(CellUtil.cloneFamily(kv), diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java index 5a68147b848..acad62c9a9b 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java @@ -87,8 +87,7 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { // nextScannerId and scannerMap are used to manage scanner state // TODO: Cleanup thread for Scanners, Scanner id wrap private final AtomicInteger nextScannerId = new AtomicInteger(0); - private final Map scannerMap = - new ConcurrentHashMap(); + private final Map scannerMap = new ConcurrentHashMap<>(); private final ConnectionCache connectionCache; diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java index d027c77995d..560ae649188 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java @@ -195,7 +195,7 @@ public class ThriftServer extends Configured implements Tool { } else if (qop == null) { return new TTransportFactory(); } else { - Map saslProperties = new HashMap(); + Map saslProperties = new HashMap<>(); saslProperties.put(Sasl.QOP, qop.getSaslQop()); TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory(); saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, @@ -306,9 +306,9 @@ public class ThriftServer extends Configured implements Tool { int workerThreads, int maxCallQueueSize, ThriftMetrics metrics) { CallQueue callQueue; if (maxCallQueueSize > 0) { - callQueue = new CallQueue(new LinkedBlockingQueue(maxCallQueueSize), metrics); + callQueue = new CallQueue(new LinkedBlockingQueue<>(maxCallQueueSize), metrics); } else { - callQueue = new CallQueue(new LinkedBlockingQueue(), metrics); + callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics); } ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java index 0001b3fba57..7b4a82bea1f 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java @@ -139,7 +139,7 @@ public class ThriftUtilities { * @see #getFromThrift(TGet) */ public static List getsFromThrift(List in) throws IOException { - List out = new ArrayList(in.size()); + List out = new ArrayList<>(in.size()); for (TGet get : in) { out.add(getFromThrift(get)); } @@ -160,7 +160,7 @@ public class ThriftUtilities { if (row != null) { out.setRow(in.getRow()); } - List columnValues = new ArrayList(raw.length); + List columnValues = new ArrayList<>(raw.length); for (Cell kv : raw) { TColumnValue col = new TColumnValue(); col.setFamily(CellUtil.cloneFamily(kv)); @@ -186,7 +186,7 @@ public class ThriftUtilities { * @see #resultFromHBase(Result) */ public static List resultsFromHBase(Result[] in) { - List out = new ArrayList(in.length); + List out = new ArrayList<>(in.length); for (Result result : in) { out.add(resultFromHBase(result)); } @@ -245,7 +245,7 @@ public class ThriftUtilities { * @see #putFromThrift(TPut) */ public static List putsFromThrift(List in) { - List out = new ArrayList(in.size()); + List out = new ArrayList<>(in.size()); for (TPut put : in) { out.add(putFromThrift(put)); } @@ -318,7 +318,7 @@ public class ThriftUtilities { */ public static List deletesFromThrift(List in) { - List out = new ArrayList(in.size()); + List out = new ArrayList<>(in.size()); for (TDelete delete : in) { out.add(deleteFromThrift(delete)); } @@ -328,7 +328,7 @@ public class ThriftUtilities { public static TDelete deleteFromHBase(Delete in) { TDelete out = new TDelete(ByteBuffer.wrap(in.getRow())); - List columns = new ArrayList(in.getFamilyCellMap().entrySet().size()); + List columns = new ArrayList<>(in.getFamilyCellMap().entrySet().size()); long rowTimestamp = in.getTimeStamp(); if (rowTimestamp != HConstants.LATEST_TIMESTAMP) { out.setTimestamp(rowTimestamp); @@ -505,7 +505,7 @@ public class ThriftUtilities { } public static List regionLocationsFromHBase(List locations) { - List tlocations = new ArrayList(locations.size()); + List tlocations = new ArrayList<>(locations.size()); for (HRegionLocation hrl:locations) { tlocations.add(regionLocationFromHBase(hrl)); } diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java index b6460099188..e59584766e8 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java @@ -56,7 +56,7 @@ public class TestCallQueue { @Parameters public static Collection getParameters() { - Collection parameters = new ArrayList(); + Collection parameters = new ArrayList<>(); for (int elementsAdded : new int[] {100, 200, 300}) { for (int elementsRemoved : new int[] {0, 20, 100}) { parameters.add(new Object[]{new Integer(elementsAdded), @@ -77,8 +77,7 @@ public class TestCallQueue { @Test(timeout = 60000) public void testPutTake() throws Exception { ThriftMetrics metrics = createMetrics(); - CallQueue callQueue = new CallQueue( - new LinkedBlockingQueue(), metrics); + CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics); for (int i = 0; i < elementsAdded; ++i) { callQueue.put(createDummyRunnable()); } @@ -91,8 +90,7 @@ public class TestCallQueue { @Test(timeout = 60000) public void testOfferPoll() throws Exception { ThriftMetrics metrics = createMetrics(); - CallQueue callQueue = new CallQueue( - new LinkedBlockingQueue(), metrics); + CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics); for (int i = 0; i < elementsAdded; ++i) { callQueue.offer(createDummyRunnable()); } diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java index 26019beb321..c04b36f0b53 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java @@ -127,7 +127,7 @@ public class TestThriftHttpServer { } private void runThriftServer(int customHeaderSize) throws Exception { - List args = new ArrayList(3); + List args = new ArrayList<>(3); port = HBaseTestingUtility.randomFreePort(); args.add("-" + ThriftServer.PORT_OPTION); args.add(String.valueOf(port)); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java index ff4bc6a47ab..d0052e50be5 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java @@ -280,13 +280,13 @@ public class TestThriftServer { } public static void doTestIncrements(HBaseHandler handler) throws Exception { - List mutations = new ArrayList(1); + List mutations = new ArrayList<>(1); mutations.add(new Mutation(false, columnAAname, valueEname, true)); mutations.add(new Mutation(false, columnAname, valueEname, true)); handler.mutateRow(tableAname, rowAname, mutations, null); handler.mutateRow(tableAname, rowBname, mutations, null); - List increments = new ArrayList(3); + List increments = new ArrayList<>(3); increments.add(new TIncrement(tableAname, rowBname, columnAAname, 7)); increments.add(new TIncrement(tableAname, rowBname, columnAAname, 7)); increments.add(new TIncrement(tableAname, rowBname, columnAAname, 7)); @@ -377,7 +377,7 @@ public class TestThriftServer { assertEquals(0, size); // Try null mutation - List mutations = new ArrayList(1); + List mutations = new ArrayList<>(1); mutations.add(new Mutation(false, columnAname, null, true)); handler.mutateRow(tableAname, rowAname, mutations, null); TRowResult rowResult3 = handler.getRow(tableAname, rowAname, null).get(0); @@ -436,7 +436,7 @@ public class TestThriftServer { // ColumnAname has been deleted, and will never be visible even with a getRowTs() assertFalse(rowResult2.columns.containsKey(columnAname)); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); columns.add(columnBname); rowResult1 = handler.getRowWithColumns(tableAname, rowAname, columns, null).get(0); @@ -555,7 +555,7 @@ public class TestThriftServer { assertEquals(rowResult6.sortedColumns.size(), 1); assertEquals(rowResult6.sortedColumns.get(0).getCell().value, valueCname); - List rowBmutations = new ArrayList(20); + List rowBmutations = new ArrayList<>(20); for (int i = 0; i < 20; i++) { rowBmutations.add(new Mutation(false, asByteBuffer("columnA:" + i), valueCname, true)); } @@ -668,13 +668,13 @@ public class TestThriftServer { UserProvider.instantiate(UTIL.getConfiguration())); handler.createTable(tableAname, getColumnDescriptors()); try { - List mutations = new ArrayList(1); + List mutations = new ArrayList<>(1); mutations.add(new Mutation(false, columnAname, valueAname, true)); handler.mutateRow(tableAname, rowAname, mutations, null); - List columnList = new ArrayList(1); + List columnList = new ArrayList<>(1); columnList.add(columnAname); - List valueList = new ArrayList(1); + List valueList = new ArrayList<>(1); valueList.add(valueBname); TAppend append = new TAppend(tableAname, rowAname, columnList, valueList); @@ -702,7 +702,7 @@ public class TestThriftServer { UserProvider.instantiate(UTIL.getConfiguration())); handler.createTable(tableAname, getColumnDescriptors()); try { - List mutations = new ArrayList(1); + List mutations = new ArrayList<>(1); mutations.add(new Mutation(false, columnAname, valueAname, true)); Mutation putB = (new Mutation(false, columnBname, valueBname, true)); @@ -796,7 +796,7 @@ public class TestThriftServer { * default ColumnDescriptor and one ColumnDescriptor with fewer versions */ private static List getColumnDescriptors() { - ArrayList cDescriptors = new ArrayList(2); + ArrayList cDescriptors = new ArrayList<>(2); // A default ColumnDescriptor ColumnDescriptor cDescA = new ColumnDescriptor(); @@ -818,7 +818,7 @@ public class TestThriftServer { * @return a List of column names for use in retrieving a scanner */ private List getColumnList(boolean includeA, boolean includeB) { - List columnList = new ArrayList(); + List columnList = new ArrayList<>(); if (includeA) columnList.add(columnAname); if (includeB) columnList.add(columnBname); return columnList; @@ -830,7 +830,7 @@ public class TestThriftServer { * and columnB having valueB */ private static List getMutations() { - List mutations = new ArrayList(2); + List mutations = new ArrayList<>(2); mutations.add(new Mutation(false, columnAname, valueAname, true)); mutations.add(new Mutation(false, columnBname, valueBname, true)); return mutations; @@ -845,19 +845,19 @@ public class TestThriftServer { * (rowB, columnB): place valueD */ private static List getBatchMutations() { - List batchMutations = new ArrayList(3); + List batchMutations = new ArrayList<>(3); // Mutations to rowA. You can't mix delete and put anymore. - List rowAmutations = new ArrayList(1); + List rowAmutations = new ArrayList<>(1); rowAmutations.add(new Mutation(true, columnAname, null, true)); batchMutations.add(new BatchMutation(rowAname, rowAmutations)); - rowAmutations = new ArrayList(1); + rowAmutations = new ArrayList<>(1); rowAmutations.add(new Mutation(false, columnBname, valueCname, true)); batchMutations.add(new BatchMutation(rowAname, rowAmutations)); // Mutations to rowB - List rowBmutations = new ArrayList(2); + List rowBmutations = new ArrayList<>(2); rowBmutations.add(new Mutation(false, columnAname, valueCname, true)); rowBmutations.add(new Mutation(false, columnBname, valueDname, true)); batchMutations.add(new BatchMutation(rowBname, rowBmutations)); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.java index 457273ec55b..87998dacc68 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.java @@ -82,7 +82,7 @@ public class TestThriftServerCmdLine { @Parameters public static Collection getParameters() { - Collection parameters = new ArrayList(); + Collection parameters = new ArrayList<>(); for (ImplType implType : ImplType.values()) { for (boolean specifyFramed : new boolean[] {false, true}) { for (boolean specifyBindIP : new boolean[] {false, true}) { @@ -151,7 +151,7 @@ public class TestThriftServerCmdLine { @Test(timeout=600000) public void testRunThriftServer() throws Exception { - List args = new ArrayList(); + List args = new ArrayList<>(); if (implType != null) { String serverTypeOption = implType.toString(); assertTrue(serverTypeOption.startsWith("-")); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index 3fe8537e375..db5bdf2efda 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -195,7 +195,7 @@ public class TestThriftHBaseServiceHandler { TGet get = new TGet(wrap(rowName)); assertFalse(handler.exists(table, get)); - List columnValues = new ArrayList(2); + List columnValues = new ArrayList<>(2); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); TPut put = new TPut(wrap(rowName), columnValues); @@ -240,7 +240,7 @@ public class TestThriftHBaseServiceHandler { byte[] rowName = "testPutGet".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValues = new ArrayList(2); + List columnValues = new ArrayList<>(2); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); TPut put = new TPut(wrap(rowName), columnValues); @@ -264,16 +264,16 @@ public class TestThriftHBaseServiceHandler { byte[] rowName1 = "testPutGetMultiple1".getBytes(); byte[] rowName2 = "testPutGetMultiple2".getBytes(); - List columnValues = new ArrayList(2); + List columnValues = new ArrayList<>(2); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); - List puts = new ArrayList(2); + List puts = new ArrayList<>(2); puts.add(new TPut(wrap(rowName1), columnValues)); puts.add(new TPut(wrap(rowName2), columnValues)); handler.putMultiple(table, puts); - List gets = new ArrayList(2); + List gets = new ArrayList<>(2); gets.add(new TGet(wrap(rowName1))); gets.add(new TGet(wrap(rowName2))); @@ -294,16 +294,16 @@ public class TestThriftHBaseServiceHandler { byte[] rowName1 = "testDeleteMultiple1".getBytes(); byte[] rowName2 = "testDeleteMultiple2".getBytes(); - List columnValues = new ArrayList(2); + List columnValues = new ArrayList<>(2); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); - List puts = new ArrayList(2); + List puts = new ArrayList<>(2); puts.add(new TPut(wrap(rowName1), columnValues)); puts.add(new TPut(wrap(rowName2), columnValues)); handler.putMultiple(table, puts); - List deletes = new ArrayList(2); + List deletes = new ArrayList<>(2); deletes.add(new TDelete(wrap(rowName1))); deletes.add(new TDelete(wrap(rowName2))); @@ -321,7 +321,7 @@ public class TestThriftHBaseServiceHandler { byte[] rowName = "testDelete".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValues = new ArrayList(2); + List columnValues = new ArrayList<>(2); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), @@ -335,7 +335,7 @@ public class TestThriftHBaseServiceHandler { handler.put(table, put); TDelete delete = new TDelete(wrap(rowName)); - List deleteColumns = new ArrayList(1); + List deleteColumns = new ArrayList<>(1); TColumn deleteColumn = new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); @@ -347,7 +347,7 @@ public class TestThriftHBaseServiceHandler { TResult result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); List returnedColumnValues = result.getColumnValues(); - List expectedColumnValues = new ArrayList(1); + List expectedColumnValues = new ArrayList<>(1); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); } @@ -358,7 +358,7 @@ public class TestThriftHBaseServiceHandler { byte[] rowName = "testDeleteAllTimestamps".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValueA.setTimestamp(System.currentTimeMillis() - 10); @@ -377,7 +377,7 @@ public class TestThriftHBaseServiceHandler { assertEquals(2, result.getColumnValuesSize()); TDelete delete = new TDelete(wrap(rowName)); - List deleteColumns = new ArrayList(1); + List deleteColumns = new ArrayList<>(1); TColumn deleteColumn = new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); @@ -401,7 +401,7 @@ public class TestThriftHBaseServiceHandler { long timestamp1 = System.currentTimeMillis() - 10; long timestamp2 = System.currentTimeMillis(); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValueA.setTimestamp(timestamp1); @@ -420,7 +420,7 @@ public class TestThriftHBaseServiceHandler { assertEquals(2, result.getColumnValuesSize()); TDelete delete = new TDelete(wrap(rowName)); - List deleteColumns = new ArrayList(1); + List deleteColumns = new ArrayList<>(1); TColumn deleteColumn = new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); @@ -443,14 +443,14 @@ public class TestThriftHBaseServiceHandler { byte[] rowName = "testIncrement".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L)))); TPut put = new TPut(wrap(rowName), columnValues); put.setColumnValues(columnValues); handler.put(table, put); - List incrementColumns = new ArrayList(1); + List incrementColumns = new ArrayList<>(1); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TIncrement increment = new TIncrement(wrap(rowName), incrementColumns); handler.increment(table, increment); @@ -471,13 +471,13 @@ public class TestThriftHBaseServiceHandler { ByteBuffer table = wrap(tableAname); byte[] v1 = Bytes.toBytes("42"); byte[] v2 = Bytes.toBytes("23"); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v1))); TPut put = new TPut(wrap(rowName), columnValues); put.setColumnValues(columnValues); handler.put(table, put); - List appendColumns = new ArrayList(1); + List appendColumns = new ArrayList<>(1); appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v2))); TAppend append = new TAppend(wrap(rowName), appendColumns); handler.append(table, append); @@ -503,14 +503,14 @@ public class TestThriftHBaseServiceHandler { byte[] rowName = "testCheckAndPut".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValuesA = new ArrayList(1); + List columnValuesA = new ArrayList<>(1); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA = new TPut(wrap(rowName), columnValuesA); putA.setColumnValues(columnValuesA); - List columnValuesB = new ArrayList(1); + List columnValuesB = new ArrayList<>(1); TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); @@ -532,7 +532,7 @@ public class TestThriftHBaseServiceHandler { result = handler.get(table, get); assertArrayEquals(rowName, result.getRow()); List returnedColumnValues = result.getColumnValues(); - List expectedColumnValues = new ArrayList(2); + List expectedColumnValues = new ArrayList<>(2); expectedColumnValues.add(columnValueA); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); @@ -550,14 +550,14 @@ public class TestThriftHBaseServiceHandler { byte[] rowName = "testCheckAndDelete".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValuesA = new ArrayList(1); + List columnValuesA = new ArrayList<>(1); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); TPut putA = new TPut(wrap(rowName), columnValuesA); putA.setColumnValues(columnValuesA); - List columnValuesB = new ArrayList(1); + List columnValuesB = new ArrayList<>(1); TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); @@ -595,7 +595,7 @@ public class TestThriftHBaseServiceHandler { // insert data TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(columnValue); for (int i = 0; i < 10; i++) { TPut put = new TPut(wrap(("testScan" + i).getBytes()), columnValues); @@ -604,7 +604,7 @@ public class TestThriftHBaseServiceHandler { // create scan instance TScan scan = new TScan(); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); @@ -656,7 +656,7 @@ public class TestThriftHBaseServiceHandler { // insert data TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(columnValue); for (int i = 0; i < numTrials; i++) { TPut put = new TPut(wrap(("testScan" + i).getBytes()), columnValues); @@ -665,7 +665,7 @@ public class TestThriftHBaseServiceHandler { // create scan instance TScan scan = new TScan(); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); @@ -694,7 +694,7 @@ public class TestThriftHBaseServiceHandler { // insert data TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(columnValue); for (int i = 0; i < 10; i++) { TPut put = new TPut(wrap(("testReverseScan" + i).getBytes()), columnValues); @@ -704,7 +704,7 @@ public class TestThriftHBaseServiceHandler { // create reverse scan instance TScan scan = new TScan(); scan.setReversed(true); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); @@ -743,7 +743,7 @@ public class TestThriftHBaseServiceHandler { // insert data TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(columnValue); for (int i = 0; i < 10; i++) { TPut put = new TPut(wrap(("testScanWithFilter" + i).getBytes()), columnValues); @@ -752,7 +752,7 @@ public class TestThriftHBaseServiceHandler { // create scan instance with filter TScan scan = new TScan(); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); @@ -792,7 +792,7 @@ public class TestThriftHBaseServiceHandler { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testPutTTL".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); // Add some dummy data columnValues.add( @@ -856,7 +856,7 @@ public class TestThriftHBaseServiceHandler { ByteBuffer table = wrap(tableAname); // insert data - List columnValues = new ArrayList(100); + List columnValues = new ArrayList<>(100); for (int i = 0; i < 100; i++) { String colNum = pad(i, (byte) 3); TColumnValue columnValue = new TColumnValue(wrap(familyAname), @@ -868,7 +868,7 @@ public class TestThriftHBaseServiceHandler { // create scan instance TScan scan = new TScan(); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); columns.add(column); @@ -917,7 +917,7 @@ public class TestThriftHBaseServiceHandler { // insert data TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(columnValue); for (int i = 0; i < 20; i++) { TPut put = @@ -927,7 +927,7 @@ public class TestThriftHBaseServiceHandler { // create scan instance TScan scan = new TScan(); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); @@ -1002,7 +1002,7 @@ public class TestThriftHBaseServiceHandler { TGet get = new TGet(wrap(rowName)); assertFalse(handler.exists(table, get)); - List columnValues = new ArrayList(2); + List columnValues = new ArrayList<>(2); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname))); TPut put = new TPut(wrap(rowName), columnValues); @@ -1144,7 +1144,7 @@ public class TestThriftHBaseServiceHandler { byte[] rowName = "testAttribute".getBytes(); byte[] attributeKey = "attribute1".getBytes(); byte[] attributeValue = "value1".getBytes(); - Map attributes = new HashMap(); + Map attributes = new HashMap<>(); attributes.put(wrap(attributeKey), wrap(attributeValue)); TGet tGet = new TGet(wrap(rowName)); @@ -1152,7 +1152,7 @@ public class TestThriftHBaseServiceHandler { Get get = getFromThrift(tGet); assertArrayEquals(get.getAttribute("attribute1"), attributeValue); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); TPut tPut = new TPut(wrap(rowName) , columnValues); tPut.setAttributes(attributes); @@ -1164,7 +1164,7 @@ public class TestThriftHBaseServiceHandler { Scan scan = scanFromThrift(tScan); assertArrayEquals(scan.getAttribute("attribute1"), attributeValue); - List incrementColumns = new ArrayList(1); + List incrementColumns = new ArrayList<>(1); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TIncrement tIncrement = new TIncrement(wrap(rowName), incrementColumns); tIncrement.setAttributes(attributes); @@ -1189,7 +1189,7 @@ public class TestThriftHBaseServiceHandler { byte[] rowName = "testMutateRow".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValuesA = new ArrayList(1); + List columnValuesA = new ArrayList<>(1); TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); columnValuesA.add(columnValueA); @@ -1203,11 +1203,11 @@ public class TestThriftHBaseServiceHandler { assertArrayEquals(rowName, result.getRow()); List returnedColumnValues = result.getColumnValues(); - List expectedColumnValues = new ArrayList(1); + List expectedColumnValues = new ArrayList<>(1); expectedColumnValues.add(columnValueA); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); - List columnValuesB = new ArrayList(1); + List columnValuesB = new ArrayList<>(1); TColumnValue columnValueB = new TColumnValue(wrap(familyAname), wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); @@ -1215,13 +1215,13 @@ public class TestThriftHBaseServiceHandler { putB.setColumnValues(columnValuesB); TDelete delete = new TDelete(wrap(rowName)); - List deleteColumns = new ArrayList(1); + List deleteColumns = new ArrayList<>(1); TColumn deleteColumn = new TColumn(wrap(familyAname)); deleteColumn.setQualifier(qualifierAname); deleteColumns.add(deleteColumn); delete.setColumns(deleteColumns); - List mutations = new ArrayList(2); + List mutations = new ArrayList<>(2); TMutation mutationA = TMutation.put(putB); mutations.add(mutationA); @@ -1235,7 +1235,7 @@ public class TestThriftHBaseServiceHandler { assertArrayEquals(rowName, result.getRow()); returnedColumnValues = result.getColumnValues(); - expectedColumnValues = new ArrayList(1); + expectedColumnValues = new ArrayList<>(1); expectedColumnValues.add(columnValueB); assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues); } @@ -1250,10 +1250,10 @@ public class TestThriftHBaseServiceHandler { @Test public void testDurability() throws Exception { byte[] rowName = "testDurability".getBytes(); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); - List incrementColumns = new ArrayList(1); + List incrementColumns = new ArrayList<>(1); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TDelete tDelete = new TDelete(wrap(rowName)); @@ -1319,7 +1319,7 @@ public class TestThriftHBaseServiceHandler { ByteBuffer value = wrap(valueAname); // Create a mutation to write to 'B', our "mutate" of "checkAndMutate" - List columnValuesB = new ArrayList(1); + List columnValuesB = new ArrayList<>(1); TColumnValue columnValueB = new TColumnValue(family, wrap(qualifierBname), wrap(valueBname)); columnValuesB.add(columnValueB); TPut putB = new TPut(row, columnValuesB); @@ -1337,7 +1337,7 @@ public class TestThriftHBaseServiceHandler { handler.checkAndMutate(table, row, family, qualifier, TCompareOp.EQUAL, value, tRowMutations)); - List columnValuesA = new ArrayList(1); + List columnValuesA = new ArrayList<>(1); TColumnValue columnValueA = new TColumnValue(family, qualifier, value); columnValuesA.add(columnValueA); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java index f7299088c8e..d672ab49919 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java @@ -197,7 +197,7 @@ public void testScanWithVisibilityLabels() throws Exception { // insert data TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(columnValue); for (int i = 0; i < 10; i++) { TPut put = new TPut(wrap(("testScan" + i).getBytes()), columnValues); @@ -212,7 +212,7 @@ public void testScanWithVisibilityLabels() throws Exception { // create scan instance TScan scan = new TScan(); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); @@ -222,7 +222,7 @@ public void testScanWithVisibilityLabels() throws Exception { scan.setStopRow("testScan\uffff".getBytes()); TAuthorization tauth = new TAuthorization(); - List labels = new ArrayList(2); + List labels = new ArrayList<>(2); labels.add(SECRET); labels.add(PRIVATE); tauth.setLabels(labels); @@ -265,7 +265,7 @@ public void testGetScannerResultsWithAuthorizations() throws Exception { // insert data TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(columnValue); for (int i = 0; i < 20; i++) { TPut put = new TPut( @@ -282,7 +282,7 @@ public void testGetScannerResultsWithAuthorizations() throws Exception { // create scan instance TScan scan = new TScan(); - List columns = new ArrayList(1); + List columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); @@ -293,7 +293,7 @@ public void testGetScannerResultsWithAuthorizations() throws Exception { // get 5 rows and check the returned results scan.setStopRow("testGetScannerResults05".getBytes()); TAuthorization tauth = new TAuthorization(); - List labels = new ArrayList(2); + List labels = new ArrayList<>(2); labels.add(SECRET); labels.add(PRIVATE); tauth.setLabels(labels); @@ -321,7 +321,7 @@ public void testGetsWithLabels() throws Exception { byte[] rowName = "testPutGet".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValues = new ArrayList(2); + List columnValues = new ArrayList<>(2); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname))); columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), @@ -334,7 +334,7 @@ public void testGetsWithLabels() throws Exception { handler.put(table, put); TGet get = new TGet(wrap(rowName)); TAuthorization tauth = new TAuthorization(); - List labels = new ArrayList(2); + List labels = new ArrayList<>(2); labels.add(SECRET); labels.add(PRIVATE); tauth.setLabels(labels); @@ -351,7 +351,7 @@ public void testIncrementWithTags() throws Exception { byte[] rowName = "testIncrementWithTags".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L)))); TPut put = new TPut(wrap(rowName), columnValues); @@ -359,7 +359,7 @@ public void testIncrementWithTags() throws Exception { put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE)); handler.put(table, put); - List incrementColumns = new ArrayList(1); + List incrementColumns = new ArrayList<>(1); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TIncrement increment = new TIncrement(wrap(rowName), incrementColumns); @@ -368,7 +368,7 @@ public void testIncrementWithTags() throws Exception { TGet get = new TGet(wrap(rowName)); TAuthorization tauth = new TAuthorization(); - List labels = new ArrayList(1); + List labels = new ArrayList<>(1); labels.add(SECRET); tauth.setLabels(labels); get.setAuthorizations(tauth); @@ -386,7 +386,7 @@ public void testIncrementWithTagsWithNotMatchLabels() throws Exception { byte[] rowName = "testIncrementWithTagsWithNotMatchLabels".getBytes(); ByteBuffer table = wrap(tableAname); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L)))); TPut put = new TPut(wrap(rowName), columnValues); @@ -394,7 +394,7 @@ public void testIncrementWithTagsWithNotMatchLabels() throws Exception { put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE)); handler.put(table, put); - List incrementColumns = new ArrayList(1); + List incrementColumns = new ArrayList<>(1); incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname))); TIncrement increment = new TIncrement(wrap(rowName), incrementColumns); @@ -403,7 +403,7 @@ public void testIncrementWithTagsWithNotMatchLabels() throws Exception { TGet get = new TGet(wrap(rowName)); TAuthorization tauth = new TAuthorization(); - List labels = new ArrayList(1); + List labels = new ArrayList<>(1); labels.add(PUBLIC); tauth.setLabels(labels); get.setAuthorizations(tauth); @@ -418,7 +418,7 @@ public void testAppend() throws Exception { ByteBuffer table = wrap(tableAname); byte[] v1 = Bytes.toBytes(1L); byte[] v2 = Bytes.toBytes(5L); - List columnValues = new ArrayList(1); + List columnValues = new ArrayList<>(1); columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(Bytes.toBytes(1L)))); TPut put = new TPut(wrap(rowName), columnValues); @@ -426,7 +426,7 @@ public void testAppend() throws Exception { put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE)); handler.put(table, put); - List appendColumns = new ArrayList(1); + List appendColumns = new ArrayList<>(1); appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v2))); TAppend append = new TAppend(wrap(rowName), appendColumns); @@ -435,7 +435,7 @@ public void testAppend() throws Exception { TGet get = new TGet(wrap(rowName)); TAuthorization tauth = new TAuthorization(); - List labels = new ArrayList(1); + List labels = new ArrayList<>(1); labels.add(SECRET); tauth.setLabels(labels); get.setAuthorizations(tauth);