From 5303f4a66d0d97f49fab7ad534fd85bd9c33ca18 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Mon, 24 Mar 2014 23:45:55 +0000 Subject: [PATCH] HDFS-6124. Merging r1581124 from trunk to branch-2. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1581137 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/BlockMissingException.java | 4 +- .../apache/hadoop/hdfs/BlockReaderLocal.java | 10 ++-- .../hadoop/hdfs/BlockReaderLocalLegacy.java | 4 +- .../hadoop/hdfs/CorruptFileBlockIterator.java | 2 +- .../org/apache/hadoop/hdfs/DFSClient.java | 8 +-- .../hadoop/hdfs/DFSHedgedReadMetrics.java | 6 +-- .../apache/hadoop/hdfs/DFSInputStream.java | 4 +- .../apache/hadoop/hdfs/DFSOutputStream.java | 16 +++--- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 6 +-- .../hadoop/hdfs/DomainSocketFactory.java | 2 +- .../hadoop/hdfs/RemoteBlockReader2.java | 2 +- .../org/apache/hadoop/hdfs/StorageType.java | 2 +- .../hadoop/hdfs/client/ShortCircuitCache.java | 2 +- .../hadoop/hdfs/net/DomainPeerServer.java | 2 +- .../apache/hadoop/hdfs/net/TcpPeerServer.java | 2 +- .../hdfs/protocol/BlockListAsLongs.java | 4 +- .../hdfs/protocol/BlockLocalPathInfo.java | 2 +- .../hdfs/protocol/CorruptFileBlocks.java | 4 +- .../hdfs/protocol/DatanodeLocalInfo.java | 6 +-- .../hadoop/hdfs/protocol/HdfsConstants.java | 6 +-- .../hadoop/hdfs/protocol/HdfsFileStatus.java | 26 ++++----- .../hdfs/protocol/HdfsLocatedFileStatus.java | 2 +- .../hadoop/hdfs/protocol/LocatedBlock.java | 8 +-- .../hadoop/hdfs/protocol/LocatedBlocks.java | 6 +-- .../hdfs/protocol/RollingUpgradeStatus.java | 2 +- .../hdfs/protocol/SnapshotDiffReport.java | 2 +- .../SnapshottableDirectoryStatus.java | 8 +-- .../datatransfer/DataTransferEncryptor.java | 6 +-- .../protocol/datatransfer/PacketReceiver.java | 2 +- .../ClientNamenodeProtocolTranslatorPB.java | 2 +- .../qjournal/client/IPCLoggerChannel.java | 2 +- .../hdfs/qjournal/protocol/RequestInfo.java | 4 +- .../hdfs/qjournal/server/JournalMetrics.java | 2 +- .../hdfs/qjournal/server/JournalNode.java | 2 +- .../server/JournalNodeHttpServer.java | 2 +- .../qjournal/server/JournalNodeRpcServer.java | 2 +- .../token/block/BlockTokenIdentifier.java | 2 +- .../token/block/BlockTokenSecretManager.java | 6 +-- .../token/block/ExportedBlockKeys.java | 2 +- .../hadoop/hdfs/server/balancer/Balancer.java | 30 +++++------ .../hdfs/server/balancer/BalancingPolicy.java | 4 +- .../server/blockmanagement/BlockManager.java | 18 +++---- .../server/blockmanagement/BlocksMap.java | 2 +- .../blockmanagement/CorruptReplicasMap.java | 2 +- .../blockmanagement/DatanodeDescriptor.java | 8 +-- .../server/blockmanagement/Host2NodesMap.java | 4 +- .../PendingDataNodeMessages.java | 2 +- .../PendingReplicationBlocks.java | 8 +-- .../UnderReplicatedBlocks.java | 4 +- .../server/common/HdfsServerConstants.java | 10 ++-- .../hadoop/hdfs/server/common/Storage.java | 4 +- .../server/common/UpgradeStatusReport.java | 6 +-- .../hdfs/server/datanode/BPOfferService.java | 2 +- .../hdfs/server/datanode/BPServiceActor.java | 4 +- .../server/datanode/BlockMetadataHeader.java | 2 +- .../datanode/BlockPoolSliceScanner.java | 2 +- .../hdfs/server/datanode/BlockReceiver.java | 4 +- .../hadoop/hdfs/server/datanode/DataNode.java | 8 +-- .../hdfs/server/datanode/DataStorage.java | 2 +- .../server/datanode/DataXceiverServer.java | 4 +- .../server/datanode/DirectoryScanner.java | 8 +-- .../AvailableSpaceVolumeChoosingPolicy.java | 6 +-- .../fsdataset/impl/BlockPoolSlice.java | 2 +- .../fsdataset/impl/FsDatasetCache.java | 6 +-- .../datanode/fsdataset/impl/ReplicaMap.java | 2 +- .../fsdataset/impl/RollingLogsImpl.java | 2 +- .../datanode/metrics/DataNodeMetrics.java | 10 ++-- .../hdfs/server/namenode/BackupImage.java | 2 +- .../hdfs/server/namenode/CachePool.java | 2 +- .../hdfs/server/namenode/Checkpointer.java | 2 +- .../namenode/EditLogBackupInputStream.java | 4 +- .../namenode/EditLogBackupOutputStream.java | 4 +- .../namenode/EditLogFileOutputStream.java | 4 +- .../server/namenode/EditsDoubleBuffer.java | 2 +- .../hdfs/server/namenode/FSDirectory.java | 4 +- .../hdfs/server/namenode/FSEditLog.java | 2 +- .../hdfs/server/namenode/FSEditLogOp.java | 2 +- .../server/namenode/FSEditLogOpCodes.java | 2 +- .../hadoop/hdfs/server/namenode/FSImage.java | 2 +- .../hdfs/server/namenode/FSImageFormat.java | 2 +- ...ImagePreTransactionalStorageInspector.java | 6 +-- .../FSImageTransactionalStorageInspector.java | 2 +- .../hdfs/server/namenode/FSNamesystem.java | 14 ++--- .../server/namenode/FSNamesystemLock.java | 2 +- .../hdfs/server/namenode/HostFileManager.java | 2 +- .../hadoop/hdfs/server/namenode/INode.java | 2 +- .../hdfs/server/namenode/ImageServlet.java | 2 +- .../hdfs/server/namenode/JournalSet.java | 2 +- .../hdfs/server/namenode/LeaseManager.java | 6 +-- .../hdfs/server/namenode/NNStorage.java | 2 +- .../hadoop/hdfs/server/namenode/NameNode.java | 4 +- .../server/namenode/NameNodeHttpServer.java | 2 +- .../namenode/NameNodeResourceChecker.java | 2 +- .../hdfs/server/namenode/NamenodeFsck.java | 6 +-- .../server/namenode/SaveNamespaceContext.java | 2 +- .../server/namenode/SecondaryNameNode.java | 4 +- .../server/namenode/SerialNumberManager.java | 10 ++-- .../hdfs/server/namenode/TransferFsImage.java | 4 +- .../ha/ConfiguredFailoverProxyProvider.java | 2 +- .../server/namenode/ha/EditLogTailer.java | 4 +- .../namenode/ha/StandbyCheckpointer.java | 2 +- .../namenode/metrics/NameNodeMetrics.java | 6 +-- .../startupprogress/PhaseTracking.java | 2 +- .../startupprogress/StartupProgress.java | 2 +- .../protocol/BalancerBandwidthCommand.java | 2 +- .../server/protocol/BlockRecoveryCommand.java | 4 +- .../server/protocol/BlocksWithLocations.java | 2 +- .../server/protocol/CheckpointCommand.java | 4 +- .../server/protocol/DatanodeRegistration.java | 4 +- .../server/protocol/HeartbeatResponse.java | 6 +-- .../server/protocol/KeyUpdateCommand.java | 2 +- .../server/protocol/NNHAStatusHeartbeat.java | 2 +- .../server/protocol/NamenodeRegistration.java | 6 +-- .../hdfs/server/protocol/NamespaceInfo.java | 2 +- .../server/protocol/ReplicaRecoveryInfo.java | 2 +- .../hdfs/server/protocol/ServerCommand.java | 2 +- .../StorageReceivedDeletedBlocks.java | 2 +- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 2 +- .../hdfs/tools/DFSZKFailoverController.java | 2 +- .../org/apache/hadoop/hdfs/tools/GetConf.java | 2 +- .../hadoop/hdfs/tools/TableListing.java | 4 +- .../OfflineEditsBinaryLoader.java | 4 +- .../offlineEditsViewer/TeeOutputStream.java | 2 +- .../offlineEditsViewer/XmlEditsVisitor.java | 2 +- .../FileDistributionCalculator.java | 2 +- .../ImageLoaderCurrent.java | 2 +- .../tools/offlineImageViewer/LsrPBImage.java | 6 +-- .../NameDistributionVisitor.java | 2 +- .../hadoop/hdfs/util/BestEffortLongFile.java | 2 +- .../hdfs/util/DataTransferThrottler.java | 4 +- .../hadoop/hdfs/util/DirectBufferPool.java | 2 +- .../hadoop/hdfs/util/LightWeightHashSet.java | 2 +- .../org/apache/hadoop/hdfs/util/XMLUtils.java | 4 +- .../hadoop/hdfs/web/ByteRangeInputStream.java | 4 +- .../hadoop/hdfs/web/HftpFileSystem.java | 2 +- .../web/resources/AclPermissionParam.java | 2 +- .../fs/TestEnhancedByteBufferAccess.java | 2 +- .../apache/hadoop/fs/TestFcHdfsSetUMask.java | 2 +- .../org/apache/hadoop/fs/TestGlobPaths.java | 2 +- .../fs/TestHDFSFileContextMainOperations.java | 2 +- .../hadoop/fs/TestResolveHdfsSymlink.java | 2 +- .../hadoop/fs/TestUrlStreamHandler.java | 2 +- .../hadoop/fs/permission/TestStickyBit.java | 4 +- .../viewfs/TestViewFileSystemAtHdfsRoot.java | 2 +- .../fs/viewfs/TestViewFileSystemHdfs.java | 2 +- .../fs/viewfs/TestViewFsAtHdfsRoot.java | 2 +- .../fs/viewfs/TestViewFsFileStatusHdfs.java | 2 +- .../hadoop/fs/viewfs/TestViewFsHdfs.java | 2 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 10 ++-- .../apache/hadoop/hdfs/MiniDFSCluster.java | 10 ++-- .../apache/hadoop/hdfs/MiniDFSNNTopology.java | 2 +- .../hadoop/hdfs/TestClientReportBadBlock.java | 2 +- .../hadoop/hdfs/TestDFSClientFailover.java | 4 +- .../hadoop/hdfs/TestDFSClientRetries.java | 14 ++--- .../org/apache/hadoop/hdfs/TestDFSMkdirs.java | 2 +- .../apache/hadoop/hdfs/TestDFSPermission.java | 32 +++++------ .../org/apache/hadoop/hdfs/TestDFSShell.java | 4 +- .../hdfs/TestDFSStorageStateRecovery.java | 2 +- .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 4 +- .../hdfs/TestDataTransferKeepalive.java | 4 +- .../hadoop/hdfs/TestDataTransferProtocol.java | 6 +-- .../hadoop/hdfs/TestDatanodeBlockScanner.java | 4 +- .../apache/hadoop/hdfs/TestDatanodeDeath.java | 16 +++--- .../apache/hadoop/hdfs/TestDecommission.java | 2 +- .../hadoop/hdfs/TestFSInputChecker.java | 4 +- .../hadoop/hdfs/TestFSOutputSummer.java | 4 +- .../apache/hadoop/hdfs/TestFetchImage.java | 2 +- .../apache/hadoop/hdfs/TestFileAppend.java | 2 +- .../apache/hadoop/hdfs/TestFileAppend2.java | 14 ++--- .../apache/hadoop/hdfs/TestFileAppend4.java | 2 +- .../org/apache/hadoop/hdfs/TestHdfsAdmin.java | 2 +- .../TestInjectionForSimulatedStorage.java | 10 ++-- .../hadoop/hdfs/TestIsMethodSupported.java | 2 +- .../org/apache/hadoop/hdfs/TestLease.java | 2 +- .../hadoop/hdfs/TestLeaseRecovery2.java | 6 +-- .../apache/hadoop/hdfs/TestLeaseRenewer.java | 6 +-- .../hadoop/hdfs/TestListPathServlet.java | 4 +- .../org/apache/hadoop/hdfs/TestPeerCache.java | 2 +- .../org/apache/hadoop/hdfs/TestPipelines.java | 4 +- .../hadoop/hdfs/TestShortCircuitCache.java | 4 +- .../hdfs/TestShortCircuitLocalRead.java | 2 +- .../org/apache/hadoop/hdfs/TestWriteRead.java | 2 +- .../apache/hadoop/hdfs/UpgradeUtilities.java | 6 +-- .../datatransfer/TestPacketReceiver.java | 4 +- .../hdfs/qjournal/MiniJournalCluster.java | 10 ++-- .../hdfs/qjournal/MiniQJMHACluster.java | 2 +- .../hadoop/hdfs/qjournal/TestNNWithQJM.java | 6 +-- .../qjournal/client/TestEpochsAreUnique.java | 4 +- .../qjournal/client/TestIPCLoggerChannel.java | 4 +- .../qjournal/client/TestQJMWithFaults.java | 8 +-- .../client/TestQuorumJournalManager.java | 2 +- .../client/TestQuorumJournalManagerUnit.java | 2 +- .../hdfs/qjournal/server/TestJournal.java | 2 +- .../hdfs/qjournal/server/TestJournalNode.java | 4 +- ...TestClientProtocolWithDelegationToken.java | 2 +- .../security/token/block/TestBlockToken.java | 16 +++--- .../TestBalancerWithEncryptedTransfer.java | 2 +- .../TestCorruptReplicaInfo.java | 2 +- .../blockmanagement/TestHost2NodesMap.java | 2 +- .../TestPendingDataNodeMessages.java | 2 +- .../TestRBWBlockInvalidation.java | 2 +- .../TestReplicationPolicy.java | 4 +- .../hdfs/server/common/TestJspHelper.java | 2 +- .../server/datanode/BlockReportTestBase.java | 4 +- .../server/datanode/SimulatedFSDataset.java | 6 +-- .../server/datanode/TestBPOfferService.java | 4 +- .../TestBlockHasMultipleReplicasOnSameDN.java | 2 +- .../server/datanode/TestBlockPoolManager.java | 6 +-- .../datanode/TestBlockPoolSliceStorage.java | 2 +- .../server/datanode/TestCachingStrategy.java | 4 +- .../server/datanode/TestDataNodeExit.java | 2 +- .../datanode/TestDataNodeVolumeFailure.java | 8 +-- .../server/datanode/TestDirectoryScanner.java | 4 +- .../server/datanode/TestFsDatasetCache.java | 2 +- .../datanode/TestIncrementalBrVariations.java | 2 +- .../TestMultipleNNDataBlockScanner.java | 4 +- .../datanode/TestReadOnlySharedStorage.java | 4 +- .../server/datanode/TestRefreshNamenodes.java | 8 +-- .../server/datanode/TestStorageReport.java | 2 +- .../impl/TestInterDatanodeProtocol.java | 2 +- .../hdfs/server/namenode/CreateEditsLog.java | 2 +- .../server/namenode/FileNameGenerator.java | 6 +-- .../namenode/NNThroughputBenchmark.java | 12 ++--- .../namenode/OfflineEditsViewerHelper.java | 4 +- .../hdfs/server/namenode/TestAuditLogs.java | 2 +- .../hdfs/server/namenode/TestCheckpoint.java | 2 +- .../namenode/TestDecommissioningStatus.java | 2 +- .../hdfs/server/namenode/TestEditLog.java | 14 ++--- .../hdfs/server/namenode/TestEditLogRace.java | 6 +-- .../namenode/TestFSImageWithSnapshot.java | 2 +- .../hdfs/server/namenode/TestFsLimits.java | 2 +- .../hdfs/server/namenode/TestHDFSConcat.java | 4 +- .../server/namenode/TestLeaseManager.java | 2 +- .../namenode/TestListCorruptFileBlocks.java | 2 +- .../TestNNStorageRetentionFunctional.java | 4 +- .../TestNNStorageRetentionManager.java | 12 ++--- .../server/namenode/TestNameEditsConfigs.java | 2 +- .../namenode/TestNameNodeJspHelper.java | 2 +- .../server/namenode/TestNameNodeRecovery.java | 8 +-- .../TestNameNodeRetryCacheMetrics.java | 2 +- .../namenode/TestNamenodeRetryCache.java | 2 +- .../server/namenode/TestSecondaryWebUi.java | 2 +- .../namenode/TestSecurityTokenEditLog.java | 4 +- .../hdfs/server/namenode/TestStreamFile.java | 12 ++--- .../namenode/ha/HAStressTestHarness.java | 4 +- .../hdfs/server/namenode/ha/HATestUtil.java | 2 +- .../ha/TestDelegationTokensWithHA.java | 2 +- .../namenode/ha/TestHAConfiguration.java | 2 +- .../namenode/ha/TestRetryCacheWithHA.java | 54 +++++++++---------- .../TestNNMetricFilesInGetListingOps.java | 2 +- .../namenode/metrics/TestNameNodeMetrics.java | 2 +- .../TestCheckpointsWithSnapshots.java | 2 +- .../snapshot/TestNestedSnapshots.java | 4 +- .../snapshot/TestOpenFilesWithSnapshot.java | 2 +- .../snapshot/TestRenameWithSnapshots.java | 4 +- .../namenode/snapshot/TestSnapshot.java | 4 +- .../snapshot/TestSnapshotDiffReport.java | 2 +- .../snapshot/TestSnapshotFileLength.java | 2 +- ...TestSnapshotNameWithInvalidCharacters.java | 2 +- .../hadoop/hdfs/tools/TestDFSHAAdmin.java | 18 +++---- .../hdfs/tools/TestDFSHAAdminMiniCluster.java | 2 +- .../TestOfflineEditsViewer.java | 2 +- .../TestDelimitedImageVisitor.java | 2 +- .../hdfs/util/TestAtomicFileOutputStream.java | 4 +- .../hdfs/util/TestDirectBufferPool.java | 2 +- .../hdfs/util/TestLightWeightHashSet.java | 2 +- .../hdfs/util/TestLightWeightLinkedSet.java | 2 +- .../hadoop/hdfs/web/TestHftpFileSystem.java | 2 +- .../hadoop/hdfs/web/TestTokenAspect.java | 2 +- .../hadoop/hdfs/web/TestWebHdfsTimeouts.java | 2 +- .../security/TestRefreshUserMappings.java | 2 +- .../apache/hadoop/test/HdfsTestDriver.java | 2 +- .../tools/TestDelegationTokenFetcher.java | 2 +- .../TestDelegationTokenRemoteFetcher.java | 2 +- 275 files changed, 597 insertions(+), 594 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index aab51b2850c..798b3ed3cf4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -196,6 +196,9 @@ Release 2.4.0 - UNRELEASED HDFS-5138. Support HDFS upgrade in HA. (atm via todd) + HDFS-6124. Add final modifier to class members. (Suresh Srinivas via + Arpit Agarwal) + OPTIMIZATIONS HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java index a03d6970db2..7bba8a4af60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockMissingException.java @@ -33,8 +33,8 @@ public class BlockMissingException extends IOException { private static final long serialVersionUID = 1L; - private String filename; - private long offset; + private final String filename; + private final long offset; /** * An exception that indicates that file was corrupted. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index 60cb23f480c..ca6c041c1df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -56,10 +56,10 @@ import com.google.common.base.Preconditions; class BlockReaderLocal implements BlockReader { static final Log LOG = LogFactory.getLog(BlockReaderLocal.class); - private static DirectBufferPool bufferPool = new DirectBufferPool(); + private static final DirectBufferPool bufferPool = new DirectBufferPool(); public static class Builder { - private int bufferSize; + private final int bufferSize; private boolean verifyChecksum; private int maxReadahead; private String filename; @@ -160,12 +160,12 @@ class BlockReaderLocal implements BlockReader { /** * Cache of Checksum#bytesPerChecksum. */ - private int bytesPerChecksum; + private final int bytesPerChecksum; /** * Cache of Checksum#checksumSize. */ - private int checksumSize; + private final int checksumSize; /** * Maximum number of chunks to allocate. @@ -191,7 +191,7 @@ class BlockReaderLocal implements BlockReader { * The rationale is that allocating a lot of buffers of different sizes would * make it very difficult for the DirectBufferPool to re-use buffers. */ - private int maxReadaheadLength; + private final int maxReadaheadLength; /** * Buffers data starting at the current dataPos and extending on diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java index f9c498d2cdf..3665d802566 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java @@ -137,7 +137,7 @@ class BlockReaderLocalLegacy implements BlockReader { // Multiple datanodes could be running on the local machine. Store proxies in // a map keyed by the ipc port of the datanode. - private static Map localDatanodeInfoMap = new HashMap(); + private static final Map localDatanodeInfoMap = new HashMap(); private final FileInputStream dataIn; // reader for the data file private final FileInputStream checksumIn; // reader for the checksum file @@ -162,7 +162,7 @@ class BlockReaderLocalLegacy implements BlockReader { private DataChecksum checksum; private final boolean verifyChecksum; - private static DirectBufferPool bufferPool = new DirectBufferPool(); + private static final DirectBufferPool bufferPool = new DirectBufferPool(); private final int bytesPerChecksum; private final int checksumSize; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java index bd3fdd56a10..9d6394c9639 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java @@ -31,7 +31,7 @@ import org.apache.hadoop.fs.RemoteIterator; */ public class CorruptFileBlockIterator implements RemoteIterator { private final DFSClient dfs; - private String path; + private final String path; private String[] files = null; private int fileIdx = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index fb59e827e44..7f71f308bfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -223,18 +223,18 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory { private volatile FsServerDefaults serverDefaults; private volatile long serverDefaultsLastUpdate; final String clientName; - SocketFactory socketFactory; + final SocketFactory socketFactory; final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure; final FileSystem.Statistics stats; private final String authority; - private Random r = new Random(); + private final Random r = new Random(); private SocketAddress[] localInterfaceAddrs; private DataEncryptionKey encryptionKey; private final CachingStrategy defaultReadCachingStrategy; private final CachingStrategy defaultWriteCachingStrategy; private final ClientContext clientContext; private volatile long hedgedReadThresholdMillis; - private static DFSHedgedReadMetrics HEDGED_READ_METRIC = + private static final DFSHedgedReadMetrics HEDGED_READ_METRIC = new DFSHedgedReadMetrics(); private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL; @@ -953,7 +953,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory { } } - private static Map localAddrMap = Collections + private static final Map localAddrMap = Collections .synchronizedMap(new HashMap()); static boolean isLocalAddress(InetSocketAddress targetAddr) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java index 2ce05f6e8c9..e7a51129b78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java @@ -25,9 +25,9 @@ import java.util.concurrent.atomic.AtomicLong; * we can grab them from client side, like HBase. */ public class DFSHedgedReadMetrics { - public AtomicLong hedgedReadOps = new AtomicLong(); - public AtomicLong hedgedReadOpsWin = new AtomicLong(); - public AtomicLong hedgedReadOpsInCurThread = new AtomicLong(); + public final AtomicLong hedgedReadOps = new AtomicLong(); + public final AtomicLong hedgedReadOpsWin = new AtomicLong(); + public final AtomicLong hedgedReadOpsInCurThread = new AtomicLong(); public void incHedgedReadOps() { hedgedReadOps.incrementAndGet(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 802bf802e4a..2d42139d7d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -1508,8 +1508,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, /** Utility class to encapsulate data node info and its address. */ static class DNAddrPair { - DatanodeInfo info; - InetSocketAddress addr; + final DatanodeInfo info; + final InetSocketAddress addr; DNAddrPair(DatanodeInfo info, InetSocketAddress addr) { this.info = info; this.addr = addr; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 12735a886fb..8e38b6c3e18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -148,21 +148,21 @@ public class DFSOutputStream extends FSOutputSummer private final AtomicBoolean persistBlocks = new AtomicBoolean(false); private volatile boolean appendChunk = false; // appending to existing partial block private long initialFileSize = 0; // at time of file open - private Progressable progress; + private final Progressable progress; private final short blockReplication; // replication factor of file private boolean shouldSyncBlock = false; // force blocks to disk upon close - private AtomicReference cachingStrategy; + private final AtomicReference cachingStrategy; private boolean failPacket = false; private class Packet { - long seqno; // sequencenumber of buffer in block - long offsetInBlock; // offset in block + final long seqno; // sequencenumber of buffer in block + final long offsetInBlock; // offset in block private boolean lastPacketInBlock; // is this the last packet in block? boolean syncBlock; // this packet forces the current block to disk - int numChunks; // number of chunks currently in packet - int maxChunks; // max chunks in packet + int numChunks; // number of chunks currently in packet + final int maxChunks; // max chunks in packet - byte[] buf; + final byte[] buf; /** * buf is pointed into like follows: @@ -323,7 +323,7 @@ public class DFSOutputStream extends FSOutputSummer private ResponseProcessor response = null; private volatile DatanodeInfo[] nodes = null; // list of targets for current block private volatile String[] storageIDs = null; - private LoadingCache excludedNodes = + private final LoadingCache excludedNodes = CacheBuilder.newBuilder() .expireAfterWrite( dfsClient.getConf().excludedNodesCacheExpiry, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 76f38b5e6cf..827763e6faa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -184,7 +184,7 @@ public class DFSUtil { */ @InterfaceAudience.Private public static class DecomStaleComparator implements Comparator { - private long staleInterval; + private final long staleInterval; /** * Constructor of DecomStaleComparator @@ -1417,8 +1417,8 @@ public class DFSUtil { } } - public static Options helpOptions = new Options(); - public static Option helpOpt = new Option("h", "help", false, + public static final Options helpOptions = new Options(); + public static final Option helpOpt = new Option("h", "help", false, "get help information"); static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DomainSocketFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DomainSocketFactory.java index 0d57a63d0ff..1bd4b424474 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DomainSocketFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DomainSocketFactory.java @@ -86,7 +86,7 @@ class DomainSocketFactory { /** * Information about domain socket paths. */ - Cache pathMap = + final Cache pathMap = CacheBuilder.newBuilder() .expireAfterWrite(10, TimeUnit.MINUTES) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 885671632a2..9cb975d92b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -91,7 +91,7 @@ public class RemoteBlockReader2 implements BlockReader { private final ReadableByteChannel in; private DataChecksum checksum; - private PacketReceiver packetReceiver = new PacketReceiver(true); + private final PacketReceiver packetReceiver = new PacketReceiver(true); private ByteBuffer curDataSlice = null; /** offset in block of the last chunk received */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java index 3f250f86fa5..408f678d650 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java @@ -31,5 +31,5 @@ public enum StorageType { DISK, SSD; - public static StorageType DEFAULT = DISK; + public static final StorageType DEFAULT = DISK; } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java index 97b6a4f4911..e4e82262588 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ShortCircuitCache.java @@ -289,7 +289,7 @@ public class ShortCircuitCache implements Closeable { * Maximum total size of the cache, including both mmapped and * no$-mmapped elements. */ - private int maxTotalSize; + private final int maxTotalSize; /** * Non-mmaped elements older than this will be closed. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java index dce64621482..e7de0d7a5aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java @@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience; @InterfaceAudience.Private public class DomainPeerServer implements PeerServer { - static Log LOG = LogFactory.getLog(DomainPeerServer.class); + static final Log LOG = LogFactory.getLog(DomainPeerServer.class); private final DomainSocket sock; DomainPeerServer(DomainSocket sock) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java index 29d86634f29..ddef592ec7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java @@ -35,7 +35,7 @@ import org.apache.hadoop.ipc.Server; @InterfaceAudience.Private public class TcpPeerServer implements PeerServer { - static Log LOG = LogFactory.getLog(TcpPeerServer.class); + static final Log LOG = LogFactory.getLog(TcpPeerServer.class); private final ServerSocket serverSocket; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java index a7f5ff16f75..8a0b7316c83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java @@ -77,7 +77,7 @@ public class BlockListAsLongs implements Iterable { + (blockIndex - finalizedSize) * LONGS_PER_UC_BLOCK; } - private long[] blockList; + private final long[] blockList; /** * Create block report from finalized and under construction lists of blocks. @@ -141,7 +141,7 @@ public class BlockListAsLongs implements Iterable { @InterfaceStability.Evolving public class BlockReportIterator implements Iterator { private int currentBlockIndex; - private Block block; + private final Block block; private ReplicaState currentReplicaState; BlockReportIterator() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java index 6bb850b952a..69fa52da318 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java @@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public class BlockLocalPathInfo { - private ExtendedBlock block; + private final ExtendedBlock block; private String localBlockPath = ""; // local file storing the data private String localMetaPath = ""; // local file storing the checksum diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java index 16c7656aaf6..60657861473 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java @@ -28,8 +28,8 @@ public class CorruptFileBlocks { // used for hashCode private static final int PRIME = 16777619; - private String[] files; - private String cookie; + private final String[] files; + private final String cookie; public CorruptFileBlocks() { this(new String[0], ""); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java index 3ee235a3edc..b7b2289dd74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java @@ -27,9 +27,9 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeLocalInfo { - private String softwareVersion; - private String configVersion; - private long uptime; // datanode uptime in seconds. + private final String softwareVersion; + private final String configVersion; + private final long uptime; // datanode uptime in seconds. public DatanodeLocalInfo(String softwareVersion, String configVersion, long uptime) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 2b1b13e75f6..7e67760f1c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -50,7 +50,7 @@ public class HdfsConstants { "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"; - public static int MIN_BLOCKS_FOR_WRITE = 5; + public static final int MIN_BLOCKS_FOR_WRITE = 5; // Long that indicates "leave current quota unchanged" public static final long QUOTA_DONT_SET = Long.MAX_VALUE; @@ -67,8 +67,8 @@ public class HdfsConstants { // HADOOP-438 // Currently we set the maximum length to 8k characters and the maximum depth // to 1k. - public static int MAX_PATH_LENGTH = 8000; - public static int MAX_PATH_DEPTH = 1000; + public static final int MAX_PATH_LENGTH = 8000; + public static final int MAX_PATH_DEPTH = 1000; // TODO should be conf injected? public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index ebc6f5c6592..66c56faa14b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -32,21 +32,21 @@ import org.apache.hadoop.hdfs.DFSUtil; @InterfaceStability.Evolving public class HdfsFileStatus { - private byte[] path; // local name of the inode that's encoded in java UTF8 - private byte[] symlink; // symlink target encoded in java UTF8 or null - private long length; - private boolean isdir; - private short block_replication; - private long blocksize; - private long modification_time; - private long access_time; - private FsPermission permission; - private String owner; - private String group; - private long fileId; + private final byte[] path; // local name of the inode that's encoded in java UTF8 + private final byte[] symlink; // symlink target encoded in java UTF8 or null + private final long length; + private final boolean isdir; + private final short block_replication; + private final long blocksize; + private final long modification_time; + private final long access_time; + private final FsPermission permission; + private final String owner; + private final String group; + private final long fileId; // Used by dir, not including dot and dotdot. Always zero for a regular file. - private int childrenNum; + private final int childrenNum; public static final byte[] EMPTY_NAME = new byte[0]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java index 401f5e3ab6e..0f90e435a43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.DFSUtil; @InterfaceAudience.Private @InterfaceStability.Evolving public class HdfsLocatedFileStatus extends HdfsFileStatus { - private LocatedBlocks locations; + private final LocatedBlocks locations; /** * Constructor diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java index bb98db46f5a..925db17b8b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java @@ -39,13 +39,13 @@ import com.google.common.collect.Lists; @InterfaceStability.Evolving public class LocatedBlock { - private ExtendedBlock b; + private final ExtendedBlock b; private long offset; // offset of the first byte of the block in the file - private DatanodeInfo[] locs; + private final DatanodeInfo[] locs; /** Storage ID for each replica */ - private String[] storageIDs; + private final String[] storageIDs; // Storage type for each replica, if reported. - private StorageType[] storageTypes; + private final StorageType[] storageTypes; // corrupt flag is true if all of the replicas of a block are corrupt. // else false. If block has few corrupt replicas, they are filtered and // their locations are not part of this object diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java index a43308e49c4..bac0e6a35b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java @@ -30,9 +30,9 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public class LocatedBlocks { - private long fileLength; - private List blocks; // array of blocks with prioritized locations - private boolean underConstruction; + private final long fileLength; + private final List blocks; // array of blocks with prioritized locations + private final boolean underConstruction; private LocatedBlock lastLocatedBlock = null; private boolean isLastBlockComplete = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java index eb37c8b7df4..9925920250b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public class RollingUpgradeStatus { - private String blockPoolId; + private final String blockPoolId; public RollingUpgradeStatus(String blockPoolId) { this.blockPoolId = blockPoolId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java index e0343a02f58..265a05d08e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java @@ -47,7 +47,7 @@ public class SnapshotDiffReport { DELETE("-"), RENAME("R"); - private String label; + private final String label; private DiffType(String label) { this.label = label; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index ef0f894baf3..3deecbf42da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -44,16 +44,16 @@ public class SnapshottableDirectoryStatus { }; /** Basic information of the snapshottable directory */ - private HdfsFileStatus dirStatus; + private final HdfsFileStatus dirStatus; /** Number of snapshots that have been taken*/ - private int snapshotNumber; + private final int snapshotNumber; /** Number of snapshots allowed. */ - private int snapshotQuota; + private final int snapshotQuota; /** Full path of the parent. */ - private byte[] parentFullPath; + private final byte[] parentFullPath; public SnapshottableDirectoryStatus(long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] localName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java index f84bdf38ead..e069a391b8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferEncryptor.java @@ -299,7 +299,7 @@ public class DataTransferEncryptor { */ private static class SaslServerCallbackHandler implements CallbackHandler { - private BlockPoolTokenSecretManager blockPoolTokenSecretManager; + private final BlockPoolTokenSecretManager blockPoolTokenSecretManager; public SaslServerCallbackHandler(BlockPoolTokenSecretManager blockPoolTokenSecretManager) { @@ -347,8 +347,8 @@ public class DataTransferEncryptor { */ private static class SaslClientCallbackHandler implements CallbackHandler { - private byte[] encryptionKey; - private String userName; + private final byte[] encryptionKey; + private final String userName; public SaslClientCallbackHandler(byte[] encryptionKey, String userName) { this.encryptionKey = encryptionKey; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java index cc2d17974a4..3503554636a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java @@ -47,7 +47,7 @@ public class PacketReceiver implements Closeable { */ private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024; - static Log LOG = LogFactory.getLog(PacketReceiver.class); + static final Log LOG = LogFactory.getLog(PacketReceiver.class); private static final DirectBufferPool bufferPool = new DirectBufferPool(); private final boolean useDirectBuffers; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index f10c662a486..579d929c0e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -1078,7 +1078,7 @@ public class ClientNamenodeProtocolTranslatorPB implements private static class BatchedCacheEntries implements BatchedEntries { - private ListCacheDirectivesResponseProto response; + private final ListCacheDirectivesResponseProto response; BatchedCacheEntries( ListCacheDirectivesResponseProto response) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 996d702572b..8588de50597 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -133,7 +133,7 @@ public class IPCLoggerChannel implements AsyncLogger { /** * Stopwatch which starts counting on each heartbeat that is sent */ - private Stopwatch lastHeartbeatStopwatch = new Stopwatch(); + private final Stopwatch lastHeartbeatStopwatch = new Stopwatch(); private static final long HEARTBEAT_INTERVAL_MILLIS = 1000; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java index b2167a5e41e..dfd1e4de26e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java @@ -22,10 +22,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; @InterfaceAudience.Private public class RequestInfo { - private String jid; + private final String jid; private long epoch; private long ipcSerialNumber; - private long committedTxId; + private final long committedTxId; public RequestInfo(String jid, long epoch, long ipcSerialNumber, long committedTxId) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java index 7bbee5b5ac1..40c0bff4f3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java @@ -52,7 +52,7 @@ class JournalMetrics { 60*60 // 1h }; - MutableQuantiles[] syncsQuantiles; + final MutableQuantiles[] syncsQuantiles; private final Journal journal; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 19c48ba9ed7..60908501faf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -63,7 +63,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { private Configuration conf; private JournalNodeRpcServer rpcServer; private JournalNodeHttpServer httpServer; - private Map journalsById = Maps.newHashMap(); + private final Map journalsById = Maps.newHashMap(); private ObjectName journalNodeInfoBeanName; private String httpServerURI; private File localDir; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java index f58de600adb..97f2190c4b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java @@ -39,7 +39,7 @@ public class JournalNodeHttpServer { public static final String JN_ATTRIBUTE_KEY = "localjournal"; private HttpServer2 httpServer; - private JournalNode localJournalNode; + private final JournalNode localJournalNode; private final Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index aef0c19063d..0665f3d035f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -50,7 +50,7 @@ import com.google.protobuf.BlockingService; class JournalNodeRpcServer implements QJournalProtocol { private static final int HANDLER_COUNT = 5; - private JournalNode jn; + private final JournalNode jn; private Server server; JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java index ac6adfefb63..67b1fe95389 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java @@ -40,7 +40,7 @@ public class BlockTokenIdentifier extends TokenIdentifier { private String userId; private String blockPoolId; private long blockId; - private EnumSet modes; + private final EnumSet modes; private byte [] cache; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java index 954f1698589..ebd3e7c94c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java @@ -74,11 +74,11 @@ public class BlockTokenSecretManager extends private int serialNo; private BlockKey currentKey; private BlockKey nextKey; - private Map allKeys; + private final Map allKeys; private String blockPoolId; - private String encryptionAlgorithm; + private final String encryptionAlgorithm; - private SecureRandom nonceGenerator = new SecureRandom(); + private final SecureRandom nonceGenerator = new SecureRandom(); public static enum AccessMode { READ, WRITE, COPY, REPLACE diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java index 4dc951003a3..ddeb1c1790a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java @@ -36,7 +36,7 @@ public class ExportedBlockKeys implements Writable { private boolean isBlockTokenEnabled; private long keyUpdateInterval; private long tokenLifetime; - private BlockKey currentKey; + private final BlockKey currentKey; private BlockKey[] allKeys; public ExportedBlockKeys() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index f8cdb1ec4e5..f5c579a9e2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -203,23 +203,23 @@ public class Balancer { private final double threshold; // all data node lists - private Collection overUtilizedDatanodes + private final Collection overUtilizedDatanodes = new LinkedList(); - private Collection aboveAvgUtilizedDatanodes + private final Collection aboveAvgUtilizedDatanodes = new LinkedList(); - private Collection belowAvgUtilizedDatanodes + private final Collection belowAvgUtilizedDatanodes = new LinkedList(); - private Collection underUtilizedDatanodes + private final Collection underUtilizedDatanodes = new LinkedList(); - private Collection sources + private final Collection sources = new HashSet(); - private Collection targets + private final Collection targets = new HashSet(); - private Map globalBlockList + private final Map globalBlockList = new HashMap(); - private MovedBlocks movedBlocks = new MovedBlocks(); + private final MovedBlocks movedBlocks = new MovedBlocks(); /** Map (datanodeUuid -> BalancerDatanodes) */ private final Map datanodeMap = new HashMap(); @@ -421,8 +421,8 @@ public class Balancer { /* A class for keeping track of blocks in the Balancer */ static private class BalancerBlock { - private Block block; // the block - private List locations + private final Block block; // the block + private final List locations = new ArrayList(3); // its locations /* Constructor */ @@ -469,7 +469,7 @@ public class Balancer { * An object of this class is stored in a source node. */ static private class NodeTask { - private BalancerDatanode datanode; //target node + private final BalancerDatanode datanode; //target node private long size; //bytes scheduled to move /* constructor */ @@ -498,7 +498,7 @@ public class Balancer { final long maxSize2Move; private long scheduledSize = 0L; // blocks being moved but not confirmed yet - private List pendingBlocks = + private final List pendingBlocks = new ArrayList(MAX_NUM_CONCURRENT_MOVES); @Override @@ -615,13 +615,13 @@ public class Balancer { } } - private ArrayList nodeTasks = new ArrayList(2); + private final ArrayList nodeTasks = new ArrayList(2); private long blocksToReceive = 0L; /* source blocks point to balancerBlocks in the global list because * we want to keep one copy of a block in balancer and be aware that * the locations are changing over time. */ - private List srcBlockList + private final List srcBlockList = new ArrayList(); /* constructor */ @@ -1092,7 +1092,7 @@ public class Balancer { return bytesMoved; } }; - private BytesMoved bytesMoved = new BytesMoved(); + private final BytesMoved bytesMoved = new BytesMoved(); /* Start a thread to dispatch block moves for each source. * The thread selects blocks to move & sends request to proxy source to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java index a7ee8534fc7..3297a250a4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java @@ -75,7 +75,7 @@ abstract class BalancingPolicy { * Cluster is balanced if each node is balanced. */ static class Node extends BalancingPolicy { - static Node INSTANCE = new Node(); + static final Node INSTANCE = new Node(); private Node() {} @Override @@ -99,7 +99,7 @@ abstract class BalancingPolicy { * Cluster is balanced if each pool in each node is balanced. */ static class Pool extends BalancingPolicy { - static Pool INSTANCE = new Pool(); + static final Pool INSTANCE = new Pool(); private Pool() {} @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 24e83c9c203..7c763dcc6df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -116,8 +116,8 @@ public class BlockManager { private volatile long corruptReplicaBlocksCount = 0L; private volatile long underReplicatedBlocksCount = 0L; private volatile long scheduledReplicationBlocksCount = 0L; - private AtomicLong excessBlocksCount = new AtomicLong(0L); - private AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L); + private final AtomicLong excessBlocksCount = new AtomicLong(0L); + private final AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L); /** Used by metrics */ public long getPendingReplicationBlocksCount() { @@ -3403,16 +3403,16 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block private static class ReplicationWork { - private Block block; - private BlockCollection bc; + private final Block block; + private final BlockCollection bc; - private DatanodeDescriptor srcNode; - private List containingNodes; - private List liveReplicaStorages; - private int additionalReplRequired; + private final DatanodeDescriptor srcNode; + private final List containingNodes; + private final List liveReplicaStorages; + private final int additionalReplRequired; private DatanodeStorageInfo targets[]; - private int priority; + private final int priority; public ReplicationWork(Block block, BlockCollection bc, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index eafd05cdfec..b658f4f395d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -35,7 +35,7 @@ import com.google.common.collect.Iterables; */ class BlocksMap { private static class StorageIterator implements Iterator { - private BlockInfo blockInfo; + private final BlockInfo blockInfo; private int nextIdx = 0; StorageIterator(BlockInfo blkInfo) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index cb9f79ab448..c7b148b0ab7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -46,7 +46,7 @@ public class CorruptReplicasMap{ CORRUPTION_REPORTED // client or datanode reported the corruption } - private SortedMap> corruptReplicasMap = + private final SortedMap> corruptReplicasMap = new TreeMap>(); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index c0d4f0bddbe..2487359d521 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -57,7 +57,7 @@ public class DatanodeDescriptor extends DatanodeInfo { // Stores status of decommissioning. // If node is not decommissioning, do not use this object for anything. - public DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); + public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); /** Block and targets pair */ @InterfaceAudience.Private @@ -192,12 +192,12 @@ public class DatanodeDescriptor extends DatanodeInfo { private long bandwidth; /** A queue of blocks to be replicated by this datanode */ - private BlockQueue replicateBlocks = new BlockQueue(); + private final BlockQueue replicateBlocks = new BlockQueue(); /** A queue of blocks to be recovered by this datanode */ - private BlockQueue recoverBlocks = + private final BlockQueue recoverBlocks = new BlockQueue(); /** A set of blocks to be invalidated by this datanode */ - private LightWeightHashSet invalidateBlocks = new LightWeightHashSet(); + private final LightWeightHashSet invalidateBlocks = new LightWeightHashSet(); /* Variables for maintaining number of blocks scheduled to be written to * this storage. This count is approximate and might be slightly bigger diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java index 6f9049a960c..e6f00246d89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java @@ -31,9 +31,9 @@ import org.apache.hadoop.hdfs.DFSUtil; @InterfaceAudience.Private @InterfaceStability.Evolving class Host2NodesMap { - private HashMap map + private final HashMap map = new HashMap(); - private ReadWriteLock hostmapLock = new ReentrantReadWriteLock(); + private final ReadWriteLock hostmapLock = new ReentrantReadWriteLock(); /** Check if node is already in the map. */ boolean contains(DatanodeDescriptor node) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java index 8afd3ce6ba7..1a9e582c5e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java @@ -34,7 +34,7 @@ import com.google.common.collect.Maps; * */ class PendingDataNodeMessages { - Map> queueByBlockId = + final Map> queueByBlockId = Maps.newHashMap(); private int count = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java index 6b07b789341..2b507e74acf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java @@ -46,8 +46,8 @@ import org.apache.hadoop.util.Daemon; class PendingReplicationBlocks { private static final Log LOG = BlockManager.LOG; - private Map pendingReplications; - private ArrayList timedOutItems; + private final Map pendingReplications; + private final ArrayList timedOutItems; Daemon timerThread = null; private volatile boolean fsRunning = true; @@ -56,7 +56,7 @@ class PendingReplicationBlocks { // a request is timed out. // private long timeout = 5 * 60 * 1000; - private long defaultRecheckInterval = 5 * 60 * 1000; + private final static long DEFAULT_RECHECK_INTERVAL = 5 * 60 * 1000; PendingReplicationBlocks(long timeoutPeriod) { if ( timeoutPeriod > 0 ) { @@ -215,7 +215,7 @@ class PendingReplicationBlocks { @Override public void run() { while (fsRunning) { - long period = Math.min(defaultRecheckInterval, timeout); + long period = Math.min(DEFAULT_RECHECK_INTERVAL, timeout); try { pendingReplicationCheck(); Thread.sleep(period); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java index 83a29d2f91b..0920afcd997 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java @@ -82,7 +82,7 @@ class UnderReplicatedBlocks implements Iterable { /** The queue for corrupt blocks: {@value} */ static final int QUEUE_WITH_CORRUPT_BLOCKS = 4; /** the queues themselves */ - private List> priorityQueues + private final List> priorityQueues = new ArrayList>(); /** Stores the replication index for each priority */ @@ -390,7 +390,7 @@ class UnderReplicatedBlocks implements Iterable { class BlockIterator implements Iterator { private int level; private boolean isIteratorForLevel = false; - private List> iterators = new ArrayList>(); + private final List> iterators = new ArrayList>(); /** * Construct an iterator over all queues. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 6aab9a535d1..7a83bbf21e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -196,10 +196,10 @@ public final class HdfsServerConstants { } // Timeouts for communicating with DataNode for streaming writes/reads - public static int READ_TIMEOUT = 60 * 1000; - public static int READ_TIMEOUT_EXTENSION = 5 * 1000; - public static int WRITE_TIMEOUT = 8 * 60 * 1000; - public static int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline + public static final int READ_TIMEOUT = 60 * 1000; + public static final int READ_TIMEOUT_EXTENSION = 5 * 1000; + public static final int WRITE_TIMEOUT = 8 * 60 * 1000; + public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline /** * Defines the NameNode role. @@ -233,7 +233,7 @@ public final class HdfsServerConstants { /** Temporary replica: created for replication and relocation only. */ TEMPORARY(4); - private int value; + private final int value; private ReplicaState(int v) { value = v; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 3656aa2b3b3..80bb4120861 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -118,8 +118,8 @@ public abstract class Storage extends StorageInfo { protected List storageDirs = new ArrayList(); private class DirIterator implements Iterator { - StorageDirType dirType; - boolean includeShared; + final StorageDirType dirType; + final boolean includeShared; int prevIndex; // for remove() int nextIndex; // for next() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java index 2bbd19f71ad..8c6ce6a65e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java @@ -27,9 +27,9 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private public class UpgradeStatusReport { - protected int version; - protected short upgradeStatus; - protected boolean finalized; + protected final int version; + protected final short upgradeStatus; + protected final boolean finalized; public UpgradeStatusReport(int version, short status, boolean isFinalized) { this.version = version; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 2977bfcac6f..9a2a6205c97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -78,7 +78,7 @@ class BPOfferService { * The list of all actors for namenodes in this nameservice, regardless * of their active or standby states. */ - private List bpServices = + private final List bpServices = new CopyOnWriteArrayList(); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 4c44f4ed9f2..207d613a3f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -74,7 +74,7 @@ class BPServiceActor implements Runnable { final InetSocketAddress nnAddr; HAServiceState state; - BPOfferService bpos; + final BPOfferService bpos; // lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read // by testing threads (through BPServiceActor#triggerXXX), while also @@ -893,7 +893,7 @@ class BPServiceActor implements Runnable { } private static class PerStoragePendingIncrementalBR { - private Map pendingIncrementalBR = + private final Map pendingIncrementalBR = Maps.newHashMap(); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java index 802942171d8..8417ffc6b48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java @@ -54,7 +54,7 @@ public class BlockMetadataHeader { * Version is two bytes. Following it is the DataChecksum * that occupies 5 bytes. */ - private short version; + private final short version; private DataChecksum checksum = null; @VisibleForTesting diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index 13a83bce5fd..3991018267e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -361,7 +361,7 @@ class BlockPoolSliceScanner { * This simple text and easily extendable and easily parseable with a * regex. */ - private static Pattern entryPattern = + private static final Pattern entryPattern = Pattern.compile("\\G\\s*([^=\\p{Space}]+)=\"(.*?)\"\\s*"); static String toString(long verificationTime, long genStamp, long blockId, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 03dcb6d9d32..7e0a1f64cdb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -86,7 +86,7 @@ class BlockReceiver implements Closeable { private int bytesPerChecksum; private int checksumSize; - private PacketReceiver packetReceiver = + private final PacketReceiver packetReceiver = new PacketReceiver(false); protected final String inAddr; @@ -902,7 +902,7 @@ class BlockReceiver implements Closeable { NON_PIPELINE, LAST_IN_PIPELINE, HAS_DOWNSTREAM_IN_PIPELINE } - private static Status[] MIRROR_ERROR_STATUS = {Status.SUCCESS, Status.ERROR}; + private static final Status[] MIRROR_ERROR_STATUS = {Status.SUCCESS, Status.ERROR}; /** * Processes responses from downstream datanodes in the pipeline diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 524fda2d2f5..7d1731e11c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -186,7 +186,7 @@ public class DataNode extends Configured private String clusterId = null; public final static String EMPTY_DEL_HINT = ""; - AtomicInteger xmitsInProgress = new AtomicInteger(); + final AtomicInteger xmitsInProgress = new AtomicInteger(); Daemon dataXceiverServer = null; Daemon localDataXceiverServer = null; ShortCircuitRegistry shortCircuitRegistry = null; @@ -224,11 +224,11 @@ public class DataNode extends Configured private SecureResources secureResources = null; private List dataDirs; private Configuration conf; - private String confVersion; + private final String confVersion; private final long maxNumberOfBlocksToLog; private final List usersWithLocalPathAccess; - private boolean connectToDnViaHostname; + private final boolean connectToDnViaHostname; ReadaheadPool readaheadPool; private final boolean getHdfsBlockLocationsEnabled; private ObjectName dataNodeInfoBeanName; @@ -1837,7 +1837,7 @@ public class DataNode extends Configured // Small wrapper around the DiskChecker class that provides means to mock // DiskChecker static methods and unittest DataNode#getDataDirsFromURIs. static class DataNodeDiskChecker { - private FsPermission expectedPermission; + private final FsPermission expectedPermission; public DataNodeDiskChecker(FsPermission expectedPermission) { this.expectedPermission = expectedPermission; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index f48bd3d6905..c6ea6a2edd8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -78,7 +78,7 @@ public class DataStorage extends Storage { private boolean initialized = false; // Maps block pool IDs to block pool storage - private Map bpStorageMap + private final Map bpStorageMap = Collections.synchronizedMap(new HashMap()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index 2361ca77276..6f7310f157a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -92,7 +92,7 @@ class DataXceiverServer implements Runnable { } } - BlockBalanceThrottler balanceThrottler; + final BlockBalanceThrottler balanceThrottler; /** * We need an estimate for block size to check if the disk partition has @@ -102,7 +102,7 @@ class DataXceiverServer implements Runnable { * A better solution is to include in the header the estimated block size, * i.e. either the actual block size or the default block size. */ - long estimateBlockSize; + final long estimateBlockSize; DataXceiverServer(PeerServer peerServer, Configuration conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 9ebd5740efd..164fd45993f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -64,8 +64,8 @@ public class DirectoryScanner implements Runnable { private volatile boolean shouldRun = false; private boolean retainDiffs = false; - ScanInfoPerBlockPool diffs = new ScanInfoPerBlockPool(); - Map stats = new HashMap(); + final ScanInfoPerBlockPool diffs = new ScanInfoPerBlockPool(); + final Map stats = new HashMap(); /** * Allow retaining diffs for unit test and analysis @@ -77,7 +77,7 @@ public class DirectoryScanner implements Runnable { /** Stats tracked for reporting and testing, per blockpool */ static class Stats { - String bpid; + final String bpid; long totalBlocks = 0; long missingMetaFile = 0; long missingBlockFile = 0; @@ -570,7 +570,7 @@ public class DirectoryScanner implements Runnable { private static class ReportCompiler implements Callable { - private FsVolumeSpi volume; + private final FsVolumeSpi volume; public ReportCompiler(FsVolumeSpi volume) { this.volume = volume; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java index 77e2f3e9c93..b1d04aa83cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java @@ -82,11 +82,11 @@ public class AvailableSpaceVolumeChoosingPolicy return null; } - private VolumeChoosingPolicy roundRobinPolicyBalanced = + private final VolumeChoosingPolicy roundRobinPolicyBalanced = new RoundRobinVolumeChoosingPolicy(); - private VolumeChoosingPolicy roundRobinPolicyHighAvailable = + private final VolumeChoosingPolicy roundRobinPolicyHighAvailable = new RoundRobinVolumeChoosingPolicy(); - private VolumeChoosingPolicy roundRobinPolicyLowAvailable = + private final VolumeChoosingPolicy roundRobinPolicyLowAvailable = new RoundRobinVolumeChoosingPolicy(); @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 6eeb23e529d..24c24fe75e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -62,7 +62,7 @@ class BlockPoolSlice { private final LDir finalizedDir; // directory store Finalized replica private final File rbwDir; // directory store RBW replica private final File tmpDir; // directory store Temporary replica - private static String DU_CACHE_FILE = "dfsUsed"; + private static final String DU_CACHE_FILE = "dfsUsed"; private volatile boolean dfsUsedSaved = false; private static final int SHUTDOWN_HOOK_PRIORITY = 30; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index 277c2e7cb52..975b836cbdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -145,7 +145,7 @@ public class FsDatasetCache { private class UsedBytesCount { private final AtomicLong usedBytes = new AtomicLong(0); - private PageRounder rounder = new PageRounder(); + private final PageRounder rounder = new PageRounder(); /** * Try to reserve more bytes. @@ -196,11 +196,11 @@ public class FsDatasetCache { /** * Number of cache commands that could not be completed successfully */ - AtomicLong numBlocksFailedToCache = new AtomicLong(0); + final AtomicLong numBlocksFailedToCache = new AtomicLong(0); /** * Number of uncache commands that could not be completed successfully */ - AtomicLong numBlocksFailedToUncache = new AtomicLong(0); + final AtomicLong numBlocksFailedToUncache = new AtomicLong(0); public FsDatasetCache(FsDatasetImpl dataset) { this.dataset = dataset; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java index fe478cfaee9..b32b37617b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java @@ -33,7 +33,7 @@ class ReplicaMap { private final Object mutex; // Map of block pool Id to another map of block Id to ReplicaInfo. - private Map> map = + private final Map> map = new HashMap>(); ReplicaMap(Object mutex) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java index 4a36e66c577..ec7376b17d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java @@ -46,7 +46,7 @@ class RollingLogsImpl implements RollingLogs { private final File prev; private PrintWriter out; //require synchronized access - private Appender appender = new Appender() { + private final Appender appender = new Appender() { @Override public Appendable append(CharSequence csq) { synchronized(RollingLogsImpl.this) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java index ffdb8e7cf86..9601bcf0c67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -78,18 +78,18 @@ public class DataNodeMetrics { @Metric MutableRate blockReports; @Metric MutableRate cacheReports; @Metric MutableRate packetAckRoundTripTimeNanos; - MutableQuantiles[] packetAckRoundTripTimeNanosQuantiles; + final MutableQuantiles[] packetAckRoundTripTimeNanosQuantiles; @Metric MutableRate flushNanos; - MutableQuantiles[] flushNanosQuantiles; + final MutableQuantiles[] flushNanosQuantiles; @Metric MutableRate fsyncNanos; - MutableQuantiles[] fsyncNanosQuantiles; + final MutableQuantiles[] fsyncNanosQuantiles; @Metric MutableRate sendDataPacketBlockedOnNetworkNanos; - MutableQuantiles[] sendDataPacketBlockedOnNetworkNanosQuantiles; + final MutableQuantiles[] sendDataPacketBlockedOnNetworkNanosQuantiles; @Metric MutableRate sendDataPacketTransferNanos; - MutableQuantiles[] sendDataPacketTransferNanosQuantiles; + final MutableQuantiles[] sendDataPacketTransferNanosQuantiles; final MetricsRegistry registry = new MetricsRegistry("datanode"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index 000a644f6f9..4f1973d93d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -42,7 +42,7 @@ import com.google.common.collect.Lists; @InterfaceAudience.Private public class BackupImage extends FSImage { /** Backup input stream for loading edits into memory */ - private EditLogBackupInputStream backupInputStream = + private final EditLogBackupInputStream backupInputStream = new EditLogBackupInputStream("Data from remote NameNode"); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index 55c7d61bdc8..87c546e3c5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -85,7 +85,7 @@ public final class CachePool { public final static class DirectiveList extends IntrusiveCollection { - private CachePool cachePool; + private final CachePool cachePool; private DirectiveList(CachePool cachePool) { this.cachePool = cachePool; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index c66574cdcaa..62aefb9c1dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -59,7 +59,7 @@ class Checkpointer extends Daemon { public static final Log LOG = LogFactory.getLog(Checkpointer.class.getName()); - private BackupNode backupNode; + private final BackupNode backupNode; volatile boolean shouldRun; private String infoBindAddress; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java index b2da66ef07a..0f6396658f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java @@ -33,8 +33,8 @@ import com.google.common.base.Preconditions; * int, int, byte[]) */ class EditLogBackupInputStream extends EditLogInputStream { - String address; // sender address - private ByteBufferInputStream inner; + final String address; // sender address + private final ByteBufferInputStream inner; private DataInputStream in; private FSEditLogOp.Reader reader = null; private FSEditLogLoader.PositionTrackingInputStream tracker = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java index aeff0d1d221..14d5b5464ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java @@ -43,8 +43,8 @@ import org.apache.hadoop.security.UserGroupInformation; * int, int, byte[]) */ class EditLogBackupOutputStream extends EditLogOutputStream { - private static Log LOG = LogFactory.getLog(EditLogFileOutputStream.class); - static int DEFAULT_BUFFER_SIZE = 256; + private static final Log LOG = LogFactory.getLog(EditLogFileOutputStream.class); + static final int DEFAULT_BUFFER_SIZE = 256; private final JournalProtocol backupNode; // RPC proxy to backup node private final NamenodeRegistration bnRegistration; // backup node registration diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index fee6b8b6ef0..e9f47b9e244 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -42,14 +42,14 @@ import com.google.common.annotations.VisibleForTesting; */ @InterfaceAudience.Private public class EditLogFileOutputStream extends EditLogOutputStream { - private static Log LOG = LogFactory.getLog(EditLogFileOutputStream.class); + private static final Log LOG = LogFactory.getLog(EditLogFileOutputStream.class); public static final int MIN_PREALLOCATION_LENGTH = 1024 * 1024; private File file; private FileOutputStream fp; // file stream for storing edit logs private FileChannel fc; // channel of the file stream for sync private EditsDoubleBuffer doubleBuf; - static ByteBuffer fill = ByteBuffer.allocateDirect(MIN_PREALLOCATION_LENGTH); + static final ByteBuffer fill = ByteBuffer.allocateDirect(MIN_PREALLOCATION_LENGTH); private boolean shouldSyncWritesAndSkipFsync = false; private static boolean shouldSkipFsyncForTests = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index d6c8a84741b..f1da3fb9b01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -132,7 +132,7 @@ public class EditsDoubleBuffer { private static class TxnBuffer extends DataOutputBuffer { long firstTxId; int numTxns; - private Writer writer; + private final Writer writer; public TxnBuffer(int initBufferSize) { super(initBufferSize); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 23660f65cdb..055b7064bcd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -125,8 +125,8 @@ public class FSDirectory implements Closeable { private long yieldCount = 0; // keep track of lock yield count. // lock to protect the directory and BlockMap - private ReentrantReadWriteLock dirLock; - private Condition cond; + private final ReentrantReadWriteLock dirLock; + private final Condition cond; // utility methods to acquire and release read lock and write lock void readLock() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index d8213d44385..dfc1a80956f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -172,7 +172,7 @@ public class FSEditLog implements LogsPurgeable { private final List editsDirs; - private ThreadLocal cache = + private final ThreadLocal cache = new ThreadLocal() { @Override protected OpInstanceCache initialValue() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index dabe6ef1b37..7a824230072 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -136,7 +136,7 @@ public abstract class FSEditLogOp { int rpcCallId = RpcConstants.INVALID_CALL_ID; final public static class OpInstanceCache { - private EnumMap inst = + private final EnumMap inst = new EnumMap(FSEditLogOpCodes.class); public OpInstanceCache() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java index 067dd75f4b1..72c304017d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java @@ -94,7 +94,7 @@ public enum FSEditLogOpCodes { return opCode; } - private static FSEditLogOpCodes[] VALUES; + private static final FSEditLogOpCodes[] VALUES; static { byte max = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index e0559870795..b0e852d00b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -940,7 +940,7 @@ public class FSImage implements Closeable { */ private class FSImageSaver implements Runnable { private final SaveNamespaceContext context; - private StorageDirectory sd; + private final StorageDirectory sd; private final NameNodeFile nnf; public FSImageSaver(SaveNamespaceContext context, StorageDirectory sd, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 5431d3295d1..e5a6d518de4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -908,7 +908,7 @@ public class FSImageFormat { } @VisibleForTesting - public static TreeMap renameReservedMap = + public static final TreeMap renameReservedMap = new TreeMap(); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java index d0554f62048..1971a0d80a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java @@ -68,10 +68,10 @@ class FSImagePreTransactionalStorageInspector extends FSImageStorageInspector { private StorageDirectory latestEditsSD = null; /** Set to determine if all of storageDirectories share the same checkpoint */ - Set checkpointTimes = new HashSet(); + final Set checkpointTimes = new HashSet(); - private List imageDirs = new ArrayList(); - private List editsDirs = new ArrayList(); + private final List imageDirs = new ArrayList(); + private final List editsDirs = new ArrayList(); @Override void inspectDirectory(StorageDirectory sd) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java index 90a672d99e3..441f89da2d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java @@ -52,7 +52,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector { private boolean needToSave = false; private boolean isUpgradeFinalized = true; - List foundImages = new ArrayList(); + final List foundImages = new ArrayList(); private long maxSeenTxId = 0; private final List namePatterns = Lists.newArrayList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 3e894861ed7..c508e2f98ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -474,7 +474,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, private final long accessTimePrecision; /** Lock to protect FSNamesystem. */ - private FSNamesystemLock fsLock; + private final FSNamesystemLock fsLock; /** * Used when this NN is in standby state to read from the shared edit log. @@ -4641,15 +4641,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats, public class SafeModeInfo { // configuration fields /** Safe mode threshold condition %.*/ - private double threshold; + private final double threshold; /** Safe mode minimum number of datanodes alive */ - private int datanodeThreshold; + private final int datanodeThreshold; /** Safe mode extension after the threshold. */ private int extension; /** Min replication required by safe mode. */ - private int safeReplication; + private final int safeReplication; /** threshold for populating needed replication queues */ - private double replQueueThreshold; + private final double replQueueThreshold; // internal fields /** Time when threshold was reached. *
-1 safe mode is off @@ -6085,8 +6085,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } static class CorruptFileBlockInfo { - String path; - Block block; + final String path; + final Block block; public CorruptFileBlockInfo(String p, Block b) { path = p; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java index a1b9477a694..f0312849b7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java @@ -50,7 +50,7 @@ class FSNamesystemLock implements ReadWriteLock { * See HDFS-5064 for more context. */ @VisibleForTesting - protected ReentrantLock longReadLock = new ReentrantLock(true); + protected final ReentrantLock longReadLock = new ReentrantLock(true); FSNamesystemLock(boolean fair) { this.coarseLock = new ReentrantReadWriteLock(fair); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HostFileManager.java index 8bd46b22509..46b92044cda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HostFileManager.java @@ -168,7 +168,7 @@ public class HostFileManager { * The different indexing strategies reflect the fact that we may or may * not have a port or IP address for each entry. */ - TreeMap index = new TreeMap(); + final TreeMap index = new TreeMap(); public boolean isEmpty() { return index.isEmpty(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index b08324f1922..6e31b2fad3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -750,7 +750,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { /** * The list of blocks that need to be removed from blocksMap */ - private List toDeleteList; + private final List toDeleteList; public BlocksMapUpdateInfo() { toDeleteList = new ChunkedArrayList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index b1f127de0e1..b844dfaa37b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -81,7 +81,7 @@ public class ImageServlet extends HttpServlet { private static final String LATEST_FSIMAGE_VALUE = "latest"; private static final String IMAGE_FILE_TYPE = "imageFile"; - private static Set currentlyDownloadingCheckpoints = + private static final Set currentlyDownloadingCheckpoints = Collections.synchronizedSet(new HashSet()); @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index a7203f95746..0a7572e3ba9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -178,7 +178,7 @@ public class JournalSet implements JournalManager { // COW implementation is necessary since some users (eg the web ui) call // getAllJournalStreams() and then iterate. Since this is rarely // mutated, there is no performance concern. - private List journals = + private final List journals = new CopyOnWriteArrayList(); final int minimumRedundantJournals; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 43e59ff757b..d324f76fbc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -77,15 +77,15 @@ public class LeaseManager { // Used for handling lock-leases // Mapping: leaseHolder -> Lease // - private SortedMap leases = new TreeMap(); + private final SortedMap leases = new TreeMap(); // Set of: Lease - private SortedSet sortedLeases = new TreeSet(); + private final SortedSet sortedLeases = new TreeSet(); // // Map path names to leases. It is protected by the sortedLeases lock. // The map stores pathnames in lexicographical order. // - private SortedMap sortedLeasesByPath = new TreeMap(); + private final SortedMap sortedLeasesByPath = new TreeMap(); private Daemon lmthread; private volatile boolean shouldRunMonitor; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index 0a5594b6b8d..a5ea1d8d498 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -117,7 +117,7 @@ public class NNStorage extends Storage implements Closeable, * flag that controls if we try to restore failed storages */ private boolean restoreFailedStorage = false; - private Object restorationLock = new Object(); + private final Object restorationLock = new Object(); private boolean disablePreUpgradableLayoutCheck = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 856491679cb..35dd5d57800 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -248,11 +248,11 @@ public class NameNode implements NameNodeStatusMXBean { protected FSNamesystem namesystem; protected final Configuration conf; - protected NamenodeRole role; + protected final NamenodeRole role; private volatile HAState state; private final boolean haEnabled; private final HAContext haContext; - protected boolean allowStaleStandbyReads; + protected final boolean allowStaleStandbyReads; /** httpServer */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 475e2e1d1b1..eb21197b2fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -52,7 +52,7 @@ public class NameNodeHttpServer { private InetSocketAddress httpAddress; private InetSocketAddress httpsAddress; - private InetSocketAddress bindAddress; + private final InetSocketAddress bindAddress; public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address"; public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java index 05451c94885..7b1dbc6f2a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java @@ -50,7 +50,7 @@ public class NameNodeResourceChecker { private static final Log LOG = LogFactory.getLog(NameNodeResourceChecker.class.getName()); // Space (in bytes) reserved per volume. - private long duReserved; + private final long duReserved; private final Configuration conf; private Map volumes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 49874b6a73b..fe227c6e075 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -144,13 +144,13 @@ public class NamenodeFsck { // We return back N files that are corrupt; the list of files returned is // ordered by block id; to allow continuation support, pass in the last block // # from previous call - private String[] currentCookie = new String[] { null }; + private final String[] currentCookie = new String[] { null }; private final Configuration conf; private final PrintWriter out; private List snapshottableDirs = null; - private BlockPlacementPolicy bpPolicy; + private final BlockPlacementPolicy bpPolicy; /** * Filesystem checker. @@ -716,7 +716,7 @@ public class NamenodeFsck { */ @VisibleForTesting static class Result { - List missingIds = new ArrayList(); + final List missingIds = new ArrayList(); long missingSize = 0L; long corruptFiles = 0L; long corruptBlocks = 0L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java index a7c4c75f005..823385a9806 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java @@ -41,7 +41,7 @@ public class SaveNamespaceContext { Collections.synchronizedList(new ArrayList()); private final Canceler canceller; - private CountDownLatch completionLatch = new CountDownLatch(1); + private final CountDownLatch completionLatch = new CountDownLatch(1); SaveNamespaceContext( FSNamesystem sourceNamesystem, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index a35d362a0d3..573939f1fb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -794,8 +794,8 @@ public class SecondaryNameNode implements Runnable { private int mergeErrorCount; private static class CheckpointLogPurger implements LogsPurgeable { - private NNStorage storage; - private StoragePurger purger + private final NNStorage storage; + private final StoragePurger purger = new NNStorageRetentionManager.DeletionStoragePurger(); public CheckpointLogPurger(NNStorage storage) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java index 6897e353fff..6d7af82ad09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java @@ -27,8 +27,8 @@ class SerialNumberManager { /** This is the only instance of {@link SerialNumberManager}.*/ static final SerialNumberManager INSTANCE = new SerialNumberManager(); - private SerialNumberMap usermap = new SerialNumberMap(); - private SerialNumberMap groupmap = new SerialNumberMap(); + private final SerialNumberMap usermap = new SerialNumberMap(); + private final SerialNumberMap groupmap = new SerialNumberMap(); private SerialNumberManager() {} @@ -43,9 +43,9 @@ class SerialNumberManager { } private static class SerialNumberMap { - private AtomicInteger max = new AtomicInteger(1); - private ConcurrentMap t2i = new ConcurrentHashMap(); - private ConcurrentMap i2t = new ConcurrentHashMap(); + private final AtomicInteger max = new AtomicInteger(1); + private final ConcurrentMap t2i = new ConcurrentHashMap(); + private final ConcurrentMap i2t = new ConcurrentHashMap(); int get(T t) { if (t == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 07870199d99..088e06a43e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -79,8 +79,8 @@ public class TransferFsImage { @VisibleForTesting static int timeout = 0; - private static URLConnectionFactory connectionFactory; - private static boolean isSpnegoEnabled; + private static final URLConnectionFactory connectionFactory; + private static final boolean isSpnegoEnabled; static { Configuration conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index ebe6ef87a5f..608cd00080e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -141,7 +141,7 @@ public class ConfiguredFailoverProxyProvider implements * an NN. Note that {@link AddressRpcProxyPair#namenode} may be null. */ private static class AddressRpcProxyPair { - public InetSocketAddress address; + public final InetSocketAddress address; public T namenode; public AddressRpcProxyPair(InetSocketAddress address) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index 4e8c46d8b26..a16af37bf1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -91,13 +91,13 @@ public class EditLogTailer { * from finalized log segments, the Standby will only be as up-to-date as how * often the logs are rolled. */ - private long logRollPeriodMs; + private final long logRollPeriodMs; /** * How often the Standby should check if there are new finalized segment(s) * available to be read from. */ - private long sleepTimeMs; + private final long sleepTimeMs; public EditLogTailer(FSNamesystem namesystem, Configuration conf) { this.tailerThread = new EditLogTailerThread(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index a80c8774720..d0431cd8623 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -71,7 +71,7 @@ public class StandbyCheckpointer { private URL activeNNAddress; private URL myNNAddress; - private Object cancelLock = new Object(); + private final Object cancelLock = new Object(); private Canceler canceler; // Keep track of how many checkpoints were canceled. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index a47eb73d23a..1397131f4ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -76,13 +76,13 @@ public class NameNodeMetrics { @Metric("Journal transactions") MutableRate transactions; @Metric("Journal syncs") MutableRate syncs; - MutableQuantiles[] syncsQuantiles; + final MutableQuantiles[] syncsQuantiles; @Metric("Journal transactions batched in sync") MutableCounterLong transactionsBatchedInSync; @Metric("Block report") MutableRate blockReport; - MutableQuantiles[] blockReportQuantiles; + final MutableQuantiles[] blockReportQuantiles; @Metric("Cache report") MutableRate cacheReport; - MutableQuantiles[] cacheReportQuantiles; + final MutableQuantiles[] cacheReportQuantiles; @Metric("Duration in SafeMode at startup in msec") MutableGaugeInt safeModeTime; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java index 3bdce3a00fd..3f1d9030297 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; final class PhaseTracking extends AbstractTracking { String file; long size = Long.MIN_VALUE; - ConcurrentMap steps = + final ConcurrentMap steps = new ConcurrentHashMap(); @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java index 33d1e220424..1b529b90c50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java @@ -49,7 +49,7 @@ import org.apache.hadoop.classification.InterfaceAudience; @InterfaceAudience.Private public class StartupProgress { // package-private for access by StartupProgressView - Map phases = + final Map phases = new ConcurrentHashMap(); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java index 1a6682ad4a2..7f178005a36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java @@ -36,7 +36,7 @@ package org.apache.hadoop.hdfs.server.protocol; public class BalancerBandwidthCommand extends DatanodeCommand { private final static long BBC_DEFAULTBANDWIDTH = 0L; - private long bandwidth; + private final long bandwidth; /** * Balancer Bandwidth Command constructor. Sets bandwidth to 0. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java index 43d1e3188be..b7199ba3803 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java @@ -41,7 +41,7 @@ import com.google.common.base.Joiner; @InterfaceAudience.Private @InterfaceStability.Evolving public class BlockRecoveryCommand extends DatanodeCommand { - Collection recoveringBlocks; + final Collection recoveringBlocks; /** * This is a block with locations from which it should be recovered @@ -53,7 +53,7 @@ public class BlockRecoveryCommand extends DatanodeCommand { @InterfaceAudience.Private @InterfaceStability.Evolving public static class RecoveringBlock extends LocatedBlock { - private long newGenerationStamp; + private final long newGenerationStamp; /** * Create RecoveringBlock. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java index e4f34ad5fe3..bc446ac7541 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java @@ -78,7 +78,7 @@ public class BlocksWithLocations { } } - private BlockWithLocations[] blocks; + private final BlockWithLocations[] blocks; /** Constructor with one parameter */ public BlocksWithLocations(BlockWithLocations[] blocks) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java index 36947b84bf6..3b7105a8bfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; @InterfaceAudience.Private @InterfaceStability.Evolving public class CheckpointCommand extends NamenodeCommand { - private CheckpointSignature cSig; - private boolean needToReturnImage; + private final CheckpointSignature cSig; + private final boolean needToReturnImage; public CheckpointCommand() { this(null, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java index 8ab18f4b3cb..000ca62b982 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java @@ -35,9 +35,9 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; public class DatanodeRegistration extends DatanodeID implements NodeRegistration { - private StorageInfo storageInfo; + private final StorageInfo storageInfo; private ExportedBlockKeys exportedKeys; - private String softwareVersion; + private final String softwareVersion; public DatanodeRegistration(DatanodeID dn, StorageInfo info, ExportedBlockKeys keys, String softwareVersion) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java index 4496bca25fe..d00179ecad2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java @@ -28,12 +28,12 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus; */ public class HeartbeatResponse { /** Commands returned from the namenode to the datanode */ - private DatanodeCommand[] commands; + private final DatanodeCommand[] commands; /** Information about the current HA-related state of the NN */ - private NNHAStatusHeartbeat haStatus; + private final NNHAStatusHeartbeat haStatus; - private RollingUpgradeStatus rollingUpdateStatus; + private final RollingUpgradeStatus rollingUpdateStatus; public HeartbeatResponse(DatanodeCommand[] cmds, NNHAStatusHeartbeat haStatus, RollingUpgradeStatus rollingUpdateStatus) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java index fb1d2189da2..b1b37e4ca55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; @InterfaceAudience.Private @InterfaceStability.Evolving public class KeyUpdateCommand extends DatanodeCommand { - private ExportedBlockKeys keys; + private final ExportedBlockKeys keys; KeyUpdateCommand() { this(new ExportedBlockKeys()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java index 66ccb3bd79f..faaf8f4e4ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; @InterfaceStability.Evolving public class NNHAStatusHeartbeat { - private HAServiceState state; + private final HAServiceState state; private long txid = HdfsConstants.INVALID_TXID; public NNHAStatusHeartbeat(HAServiceState state, long txid) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java index 7a0f2dea021..e65ad24831b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java @@ -32,9 +32,9 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; @InterfaceStability.Evolving public class NamenodeRegistration extends StorageInfo implements NodeRegistration { - String rpcAddress; // RPC address of the node - String httpAddress; // HTTP address of the node - NamenodeRole role; // node role + final String rpcAddress; // RPC address of the node + final String httpAddress; // HTTP address of the node + final NamenodeRole role; // node role public NamenodeRegistration(String address, String httpAddress, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index 8473132fdb5..07337434e23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -37,7 +37,7 @@ import org.apache.hadoop.util.VersionInfo; @InterfaceAudience.Private @InterfaceStability.Evolving public class NamespaceInfo extends StorageInfo { - String buildVersion; + final String buildVersion; String blockPoolID = ""; // id of the block pool String softwareVersion; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java index ece1b3d8ac3..ee1fa1b314c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; @InterfaceAudience.Private @InterfaceStability.Evolving public class ReplicaRecoveryInfo extends Block { - private ReplicaState originalState; + private final ReplicaState originalState; public ReplicaRecoveryInfo(long blockId, long diskLen, long gs, ReplicaState rState) { set(blockId, diskLen, gs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java index 193839cd37f..b81afa93447 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java @@ -31,7 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class ServerCommand { - private int action; + private final int action; /** * Create a command for the specified action. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReceivedDeletedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReceivedDeletedBlocks.java index 29a7a59db02..db9505a952f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReceivedDeletedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReceivedDeletedBlocks.java @@ -23,7 +23,7 @@ package org.apache.hadoop.hdfs.server.protocol; * storage. */ public class StorageReceivedDeletedBlocks { - DatanodeStorage storage; + final DatanodeStorage storage; private final ReceivedDeletedBlockInfo[] blocks; @Deprecated diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index b674d09d509..01e07eef0b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -1032,7 +1032,7 @@ public class CacheAdmin extends Configured implements Tool { } } - private static Command[] COMMANDS = { + private static final Command[] COMMANDS = { new AddCacheDirectiveInfoCommand(), new ModifyCacheDirectiveInfoCommand(), new ListCacheDirectiveInfoCommand(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java index e18c9a86ad0..283ca5e163f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java @@ -53,7 +53,7 @@ public class DFSZKFailoverController extends ZKFailoverController { private static final Log LOG = LogFactory.getLog(DFSZKFailoverController.class); - private AccessControlList adminAcl; + private final AccessControlList adminAcl; /* the same as superclass's localTarget, but with the more specfic NN type */ private final NNHAServiceTarget localNNTarget; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java index 92a3864a675..29ca0db7595 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java @@ -76,7 +76,7 @@ public class GetConf extends Configured implements Tool { NNRPCADDRESSES("-nnRpcAddresses", "gets the namenode rpc addresses"), CONFKEY("-confKey [key]", "gets a specific key from the configuration"); - private static Map map; + private static final Map map; static { map = new HashMap(); map.put(NAMENODE.getName().toLowerCase(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java index cfa409309e1..46cc3c05971 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java @@ -191,8 +191,8 @@ public class TableListing { private final Column columns[]; private int numRows; - private boolean showHeader; - private int wrapWidth; + private final boolean showHeader; + private final int wrapWidth; TableListing(Column columns[], boolean showHeader, int wrapWidth) { this.columns = columns; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java index 476c23ddb7d..d2634b38feb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java @@ -35,8 +35,8 @@ import org.apache.hadoop.io.IOUtils; @InterfaceAudience.Private @InterfaceStability.Unstable class OfflineEditsBinaryLoader implements OfflineEditsLoader { - private OfflineEditsVisitor visitor; - private EditLogInputStream inputStream; + private final OfflineEditsVisitor visitor; + private final EditLogInputStream inputStream; private final boolean fixTxIds; private final boolean recoveryMode; private long nextTxId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TeeOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TeeOutputStream.java index f877d39369f..96f56a3cdb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TeeOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TeeOutputStream.java @@ -24,7 +24,7 @@ import java.io.OutputStream; * A TeeOutputStream writes its output to multiple output streams. */ public class TeeOutputStream extends OutputStream { - private OutputStream outs[]; + private final OutputStream[] outs; public TeeOutputStream(OutputStream outs[]) { this.outs = outs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java index 5d5c9866d95..b4fa791e741 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java @@ -39,7 +39,7 @@ import com.sun.org.apache.xml.internal.serialize.XMLSerializer; @InterfaceAudience.Private @InterfaceStability.Unstable public class XmlEditsVisitor implements OfflineEditsVisitor { - private OutputStream out; + private final OutputStream out; private ContentHandler contentHandler; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java index 61c8714b8d1..c8033dd4f35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java @@ -69,7 +69,7 @@ final class FileDistributionCalculator { private final int steps; private final PrintWriter out; - private int[] distribution; + private final int[] distribution; private int totalFiles; private int totalDirectories; private int totalBlocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index fd0ddd9e0f7..b07a1a6eb7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -125,7 +125,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; class ImageLoaderCurrent implements ImageLoader { protected final DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm"); - private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, + private static final int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51, -52 }; private int imageVersion = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java index b519faa15b4..d80fcf1e94c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java @@ -71,9 +71,9 @@ final class LsrPBImage { private final Configuration conf; private final PrintWriter out; private String[] stringTable; - private HashMap inodes = Maps.newHashMap(); - private HashMap dirmap = Maps.newHashMap(); - private ArrayList refList = Lists.newArrayList(); + private final HashMap inodes = Maps.newHashMap(); + private final HashMap dirmap = Maps.newHashMap(); + private final ArrayList refList = Lists.newArrayList(); public LsrPBImage(Configuration conf, PrintWriter out) { this.conf = conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java index 28bcf103ee6..4544d55e13b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java @@ -34,7 +34,7 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private public class NameDistributionVisitor extends TextWriterImageVisitor { - HashMap counts = new HashMap(); + final HashMap counts = new HashMap(); public NameDistributionVisitor(String filename, boolean printToScreen) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java index 292402b245f..b2626e9d181 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java @@ -53,7 +53,7 @@ public class BestEffortLongFile implements Closeable { private FileChannel ch = null; - private ByteBuffer buf = ByteBuffer.allocate(Long.SIZE/8); + private final ByteBuffer buf = ByteBuffer.allocate(Long.SIZE/8); public BestEffortLongFile(File file, long defaultVal) { this.file = file; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java index d00d4341b94..cf33ca3a453 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java @@ -26,8 +26,8 @@ import static org.apache.hadoop.util.Time.monotonicNow; * threads. */ public class DataTransferThrottler { - private long period; // period over which bw is imposed - private long periodExtension; // Max period over which bw accumulates. + private final long period; // period over which bw is imposed + private final long periodExtension; // Max period over which bw accumulates. private long bytesPerPeriod; // total number of bytes can be sent in each period private long curPeriodStart; // current period starting time private long curReserve; // remaining bytes can be sent in the period diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java index 6972a86037d..7332d34594e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java @@ -44,7 +44,7 @@ import com.google.common.annotations.VisibleForTesting; public class DirectBufferPool { // Essentially implement a multimap with weak values. - ConcurrentMap>> buffersBySize = + final ConcurrentMap>> buffersBySize = new ConcurrentHashMap>>(); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java index 8c0fc931e97..ee301be6181 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java @@ -92,7 +92,7 @@ public class LightWeightHashSet implements Collection { private float maxLoadFactor; private float minLoadFactor; - private int expandMultiplier = 2; + private final int expandMultiplier = 2; private int expandThreshold; private int shrinkThreshold; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java index 5abc724e4c5..a0324083451 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java @@ -88,7 +88,7 @@ public class XMLUtils { return false; } - private static int NUM_SLASH_POSITIONS = 4; + private static final int NUM_SLASH_POSITIONS = 4; private static String mangleCodePoint(int cp) { return String.format("\\%0" + NUM_SLASH_POSITIONS + "x;", cp); @@ -200,7 +200,7 @@ public class XMLUtils { * file. */ static public class Stanza { - private TreeMap > subtrees; + private final TreeMap > subtrees; /** The unmangled value of this stanza. */ private String value; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java index b0f1d35270a..fd274b778a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java @@ -69,8 +69,8 @@ public abstract class ByteRangeInputStream extends FSInputStream { NORMAL, SEEK, CLOSED } protected InputStream in; - protected URLOpener originalURL; - protected URLOpener resolvedURL; + protected final URLOpener originalURL; + protected final URLOpener resolvedURL; protected long startPos = 0; protected long currentPos = 0; protected Long fileLength = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java index 07d0cd900cf..309de21eaa5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java @@ -411,7 +411,7 @@ public class HftpFileSystem extends FileSystem /** Class to parse and store a listing reply from the server. */ class LsParser extends DefaultHandler { - ArrayList fslist = new ArrayList(); + final ArrayList fslist = new ArrayList(); @Override public void startElement(String ns, String localname, String qname, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java index b335379b1bb..a78391cbe73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java @@ -32,7 +32,7 @@ public class AclPermissionParam extends StringParam { /** Default parameter value. */ public static final String DEFAULT = ""; - private static Domain DOMAIN = new Domain(NAME, + private static final Domain DOMAIN = new Domain(NAME, Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT)); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index f0ebfcb37c8..fc7160620d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -117,7 +117,7 @@ public class TestEnhancedByteBufferAccess { return resultArray; } - private static int BLOCK_SIZE = 4096; + private static final int BLOCK_SIZE = 4096; public static HdfsConfiguration initZeroCopyTest() { Assume.assumeTrue(NativeIO.isAvailable()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java index e9d6a5c9ccb..5b758d37707 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java @@ -42,7 +42,7 @@ import org.junit.Test; public class TestFcHdfsSetUMask { - private static FileContextTestHelper fileContextTestHelper = + private static final FileContextTestHelper fileContextTestHelper = new FileContextTestHelper(); private static MiniDFSCluster cluster; private static Path defaultWorkingDirectory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 8a4e6e268ff..4219c344b24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -60,7 +60,7 @@ public class TestGlobPaths { static private FileContext unprivilegedFc; static final private int NUM_OF_PATHS = 4; static private String USER_DIR; - private Path[] path = new Path[NUM_OF_PATHS]; + private final Path[] path = new Path[NUM_OF_PATHS]; @BeforeClass public static void setUp() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index 5f63ec930d7..f87da6b8d76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -45,7 +45,7 @@ public class TestHDFSFileContextMainOperations extends FileContextMainOperationsBaseTest { private static MiniDFSCluster cluster; private static Path defaultWorkingDirectory; - private static HdfsConfiguration CONF = new HdfsConfiguration(); + private static final HdfsConfiguration CONF = new HdfsConfiguration(); @Override protected FileContextTestHelper createFileContextHelper() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java index a243fe959b2..e068cf95cbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java @@ -50,7 +50,7 @@ import org.junit.Test; * underlying file system as Hdfs. */ public class TestResolveHdfsSymlink { - private static FileContextTestHelper helper = new FileContextTestHelper(); + private static final FileContextTestHelper helper = new FileContextTestHelper(); private static MiniDFSCluster cluster = null; @BeforeClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java index 0871f6edd9c..6fc97a2948d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java @@ -39,7 +39,7 @@ import org.junit.Test; */ public class TestUrlStreamHandler { - private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class); + private static final File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class); /** * Test opening and reading from an InputStream through a hdfs:// URL. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java index 849bfa202de..9d0e31be520 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java @@ -49,9 +49,9 @@ import org.junit.Test; public class TestStickyBit { - static UserGroupInformation user1 = + static final UserGroupInformation user1 = UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"}); - static UserGroupInformation user2 = + static final UserGroupInformation user2 = UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"}); private static MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java index 72ce5325c9c..b6c8e699ffc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java @@ -40,7 +40,7 @@ import org.junit.BeforeClass; public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest { private static MiniDFSCluster cluster; - private static Configuration CONF = new Configuration(); + private static final Configuration CONF = new Configuration(); private static FileSystem fHdfs; @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index 3fc4a567f4b..eb3ee333224 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -44,7 +44,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest { private static MiniDFSCluster cluster; private static Path defaultWorkingDirectory; private static Path defaultWorkingDirectory2; - private static Configuration CONF = new Configuration(); + private static final Configuration CONF = new Configuration(); private static FileSystem fHdfs; private static FileSystem fHdfs2; private FileSystem fsTarget2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java index ac0a267f18f..504e192df3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java @@ -41,7 +41,7 @@ import org.junit.BeforeClass; public class TestViewFsAtHdfsRoot extends ViewFsBaseTest { private static MiniDFSCluster cluster; - private static HdfsConfiguration CONF = new HdfsConfiguration(); + private static final HdfsConfiguration CONF = new HdfsConfiguration(); private static FileContext fc; @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java index 5db84315229..e3b4fe25fc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java @@ -55,7 +55,7 @@ public class TestViewFsFileStatusHdfs { private static final FileSystemTestHelper fileSystemTestHelper = new FileSystemTestHelper(); private static MiniDFSCluster cluster; private static Path defaultWorkingDirectory; - private static Configuration CONF = new Configuration(); + private static final Configuration CONF = new Configuration(); private static FileSystem fHdfs; private static FileSystem vfs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java index f1da50647d9..e7d94fc5b3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java @@ -37,7 +37,7 @@ import org.junit.BeforeClass; public class TestViewFsHdfs extends ViewFsBaseTest { private static MiniDFSCluster cluster; - private static HdfsConfiguration CONF = new HdfsConfiguration(); + private static final HdfsConfiguration CONF = new HdfsConfiguration(); private static FileContext fc; @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index b136a5e9910..6a3cc23d129 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -86,8 +86,8 @@ public class DFSTestUtil { private static final Log LOG = LogFactory.getLog(DFSTestUtil.class); - private static Random gen = new Random(); - private static String[] dirNames = { + private static final Random gen = new Random(); + private static final String[] dirNames = { "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine" }; @@ -161,8 +161,8 @@ public class DFSTestUtil { private class MyFile { private String name = ""; - private int size; - private long seed; + private final int size; + private final long seed; MyFile() { int nLevels = gen.nextInt(maxLevels); @@ -1193,7 +1193,7 @@ public class DFSTestUtil { private final String testName; private final TemporarySocketDirectory sockDir; private boolean closed = false; - private boolean formerTcpReadsDisabled; + private final boolean formerTcpReadsDisabled; public ShortCircuitTestContext(String testName) { this.testName = testName; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 6fe8ecbadca..4bc6bc5a146 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -395,11 +395,11 @@ public class MiniDFSCluster { } public class DataNodeProperties { - DataNode datanode; - Configuration conf; + final DataNode datanode; + final Configuration conf; String[] dnArgs; - SecureResources secureResources; - int ipcPort; + final SecureResources secureResources; + final int ipcPort; DataNodeProperties(DataNode node, Configuration conf, String[] args, SecureResources secureResources, int ipcPort) { @@ -418,7 +418,7 @@ public class MiniDFSCluster { private Configuration conf; private NameNodeInfo[] nameNodes; protected int numDataNodes; - protected ArrayList dataNodes = + protected final ArrayList dataNodes = new ArrayList(); private File base_dir; private File data_dir; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java index 4c1eff89a42..b919348744a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java @@ -208,7 +208,7 @@ public class MiniDFSNNTopology { } public static class NNConf { - private String nnId; + private final String nnId; private int httpPort; private int ipcPort; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index 74c763d5aa7..8d9ede019f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -64,7 +64,7 @@ public class TestClientReportBadBlock { private static int buffersize; private static MiniDFSCluster cluster; private static DistributedFileSystem dfs; - private static int numDataNodes = 3; + private static final int numDataNodes = 3; private static final Configuration conf = new HdfsConfiguration(); Random rand = new Random(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java index aacebce3595..7e6975735be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java @@ -66,7 +66,7 @@ public class TestDFSClientFailover { private static final Path TEST_FILE = new Path("/tmp/failover-test-file"); private static final int FILE_LENGTH_TO_VERIFY = 100; - private Configuration conf = new Configuration(); + private final Configuration conf = new Configuration(); private MiniDFSCluster cluster; @Before @@ -136,7 +136,7 @@ public class TestDFSClientFailover { private static class InjectingSocketFactory extends StandardSocketFactory { - static SocketFactory defaultFactory = SocketFactory.getDefault(); + static final SocketFactory defaultFactory = SocketFactory.getDefault(); static int portToInjectOn; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index e8b652a8e07..a44982b6e3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -457,7 +457,7 @@ public class TestDFSClientRetries { */ private static class FailNTimesAnswer implements Answer { private int failuresLeft; - private NamenodeProtocols realNN; + private final NamenodeProtocols realNN; public FailNTimesAnswer(NamenodeProtocols preSpyNN, int timesToFail) { failuresLeft = timesToFail; @@ -670,13 +670,13 @@ public class TestDFSClientRetries { class DFSClientReader implements Runnable { DFSClient client; - Configuration conf; - byte[] expected_sha; + final Configuration conf; + final byte[] expected_sha; FileSystem fs; - Path filePath; - MiniDFSCluster cluster; - int len; - Counter counter; + final Path filePath; + final MiniDFSCluster cluster; + final int len; + final Counter counter; DFSClientReader(Path file, MiniDFSCluster cluster, byte[] hash_sha, int fileLen, Counter cnt) { filePath = file; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java index cec0c594a71..e516d99f975 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java @@ -37,7 +37,7 @@ import org.junit.Test; * directories, and generally behaves as expected. */ public class TestDFSMkdirs { - private Configuration conf = new HdfsConfiguration(); + private final Configuration conf = new HdfsConfiguration(); private static final String[] NON_CANONICAL_PATHS = new String[] { "//test1", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index b8d225df56e..b098f8666d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -56,10 +56,10 @@ public class TestDFSPermission { final private static String USER2_NAME = "user2"; final private static String USER3_NAME = "user3"; - private static UserGroupInformation SUPERUSER; - private static UserGroupInformation USER1; - private static UserGroupInformation USER2; - private static UserGroupInformation USER3; + private static final UserGroupInformation SUPERUSER; + private static final UserGroupInformation USER1; + private static final UserGroupInformation USER2; + private static final UserGroupInformation USER3; final private static short MAX_PERMISSION = 511; final private static short DEFAULT_UMASK = 022; @@ -75,7 +75,7 @@ public class TestDFSPermission { private FileSystem fs; private MiniDFSCluster cluster; - private static Random r; + private static final Random r; static { try { @@ -472,8 +472,8 @@ public class TestDFSPermission { * value is generated only once. */ static private class PermissionGenerator { - private Random r; - private short permissions[] = new short[MAX_PERMISSION + 1]; + private final Random r; + private final short[] permissions = new short[MAX_PERMISSION + 1]; private int numLeft = MAX_PERMISSION + 1; PermissionGenerator(Random r) { @@ -642,7 +642,7 @@ public class TestDFSPermission { } } - private CreatePermissionVerifier createVerifier = + private final CreatePermissionVerifier createVerifier = new CreatePermissionVerifier(); /* test if the permission checking of create/mkdir is correct */ private void testCreateMkdirs(UserGroupInformation ugi, Path path, @@ -672,7 +672,7 @@ public class TestDFSPermission { } } - private OpenPermissionVerifier openVerifier = new OpenPermissionVerifier(); + private final OpenPermissionVerifier openVerifier = new OpenPermissionVerifier(); /* test if the permission checking of open is correct */ private void testOpen(UserGroupInformation ugi, Path path, short ancestorPermission, short parentPermission, short filePermission) @@ -697,7 +697,7 @@ public class TestDFSPermission { } } - private SetReplicationPermissionVerifier replicatorVerifier = + private final SetReplicationPermissionVerifier replicatorVerifier = new SetReplicationPermissionVerifier(); /* test if the permission checking of setReplication is correct */ private void testSetReplication(UserGroupInformation ugi, Path path, @@ -725,7 +725,7 @@ public class TestDFSPermission { } } - private SetTimesPermissionVerifier timesVerifier = + private final SetTimesPermissionVerifier timesVerifier = new SetTimesPermissionVerifier(); /* test if the permission checking of setReplication is correct */ private void testSetTimes(UserGroupInformation ugi, Path path, @@ -780,7 +780,7 @@ public class TestDFSPermission { } } - private StatsPermissionVerifier statsVerifier = new StatsPermissionVerifier(); + private final StatsPermissionVerifier statsVerifier = new StatsPermissionVerifier(); /* test if the permission checking of isDirectory, exist, * getFileInfo, getContentSummary is correct */ private void testStats(UserGroupInformation ugi, Path path, @@ -840,7 +840,7 @@ public class TestDFSPermission { } } - ListPermissionVerifier listVerifier = new ListPermissionVerifier(); + final ListPermissionVerifier listVerifier = new ListPermissionVerifier(); /* test if the permission checking of list is correct */ private void testList(UserGroupInformation ugi, Path file, Path dir, short ancestorPermission, short parentPermission, short filePermission) @@ -896,7 +896,7 @@ public class TestDFSPermission { } } - RenamePermissionVerifier renameVerifier = new RenamePermissionVerifier(); + final RenamePermissionVerifier renameVerifier = new RenamePermissionVerifier(); /* test if the permission checking of rename is correct */ private void testRename(UserGroupInformation ugi, Path src, Path dst, short srcAncestorPermission, short srcParentPermission, @@ -958,7 +958,7 @@ public class TestDFSPermission { } } - DeletePermissionVerifier fileDeletionVerifier = + final DeletePermissionVerifier fileDeletionVerifier = new DeletePermissionVerifier(); /* test if the permission checking of file deletion is correct */ @@ -968,7 +968,7 @@ public class TestDFSPermission { fileDeletionVerifier.verifyPermission(ugi); } - DeleteDirPermissionVerifier dirDeletionVerifier = + final DeleteDirPermissionVerifier dirDeletionVerifier = new DeleteDirPermissionVerifier(); /* test if the permission checking of directory deletion is correct */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 5b739824ee5..5c4aca7f29c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -63,7 +63,7 @@ import static org.junit.Assert.*; */ public class TestDFSShell { private static final Log LOG = LogFactory.getLog(TestDFSShell.class); - private static AtomicInteger counter = new AtomicInteger(); + private static final AtomicInteger counter = new AtomicInteger(); private final int SUCCESS = 0; private final int ERROR = 1; @@ -1459,7 +1459,7 @@ public class TestDFSShell { TestGetRunner runner = new TestGetRunner() { private int count = 0; - private FsShell shell = new FsShell(conf); + private final FsShell shell = new FsShell(conf); public String run(int exitcode, String... options) throws IOException { String dst = TEST_ROOT_DIR + "/" + fname+ ++count; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java index c0392ac525d..c4a41b7549a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @@ -77,7 +77,7 @@ public class TestDFSStorageStateRecovery { * 5) current directory should exist after recovery but before startup * 6) previous directory should exist after recovery but before startup */ - static boolean[][] testCases = new boolean[][] { + static final boolean[][] testCases = new boolean[][] { new boolean[] {true, false, false, false, true, true, false}, // 1 new boolean[] {true, true, false, false, true, true, true }, // 2 new boolean[] {true, false, true, false, true, true, true }, // 3 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 802c4a665de..4b87eb16769 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -65,7 +65,7 @@ public class TestDFSUpgradeFromImage { private static final Log LOG = LogFactory .getLog(TestDFSUpgradeFromImage.class); - private static File TEST_ROOT_DIR = + private static final File TEST_ROOT_DIR = new File(MiniDFSCluster.getBaseDirectory()); private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; @@ -87,7 +87,7 @@ public class TestDFSUpgradeFromImage { } } - LinkedList refList = new LinkedList(); + final LinkedList refList = new LinkedList(); Iterator refIter; boolean printChecksum = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java index dc01c56d255..132218c37a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -50,10 +50,10 @@ import org.junit.Test; import com.google.common.io.NullOutputStream; public class TestDataTransferKeepalive { - Configuration conf = new HdfsConfiguration(); + final Configuration conf = new HdfsConfiguration(); private MiniDFSCluster cluster; private DataNode dn; - private static Path TEST_FILE = new Path("/test"); + private static final Path TEST_FILE = new Path("/test"); private static final int KEEPALIVE_TIMEOUT = 1000; private static final int WRITE_TIMEOUT = 3000; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 3583b2e17cb..3b7381e6216 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -80,11 +80,11 @@ public class TestDataTransferProtocol { DatanodeID datanode; InetSocketAddress dnAddr; - ByteArrayOutputStream sendBuf = new ByteArrayOutputStream(128); + final ByteArrayOutputStream sendBuf = new ByteArrayOutputStream(128); final DataOutputStream sendOut = new DataOutputStream(sendBuf); final Sender sender = new Sender(sendOut); - ByteArrayOutputStream recvBuf = new ByteArrayOutputStream(128); - DataOutputStream recvOut = new DataOutputStream(recvBuf); + final ByteArrayOutputStream recvBuf = new ByteArrayOutputStream(128); + final DataOutputStream recvOut = new DataOutputStream(recvBuf); private void sendRecvData(String testDescription, boolean eofExpected) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index ba4daa3e825..adb915417a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -61,10 +61,10 @@ public class TestDatanodeBlockScanner { private static final long TIMEOUT = 20000; // 20 sec. - private static Pattern pattern = + private static final Pattern pattern = Pattern.compile(".*?(blk_[-]*\\d+).*?scan time\\s*:\\s*(\\d+)"); - private static Pattern pattern_blockVerify = + private static final Pattern pattern_blockVerify = Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)"); static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index 6c48cc76810..2504a269970 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -61,18 +61,18 @@ public class TestDatanodeDeath { static final int numDatanodes = 15; static final short replication = 3; - int numberOfFiles = 3; - int numThreads = 5; + final int numberOfFiles = 3; + final int numThreads = 5; Workload[] workload = null; // // an object that does a bunch of transactions // static class Workload extends Thread { - private short replication; - private int numberOfFiles; - private int id; - private FileSystem fs; + private final short replication; + private final int numberOfFiles; + private final int id; + private final FileSystem fs; private long stamp; private final long myseed; @@ -221,8 +221,8 @@ public class TestDatanodeDeath { */ class Modify extends Thread { volatile boolean running; - MiniDFSCluster cluster; - Configuration conf; + final MiniDFSCluster cluster; + final Configuration conf; Modify(Configuration conf, MiniDFSCluster cluster) { running = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index dfc3b226835..073179a495f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -66,7 +66,7 @@ public class TestDecommission { static final int BLOCKREPORT_INTERVAL_MSEC = 1000; //block report in msec static final int NAMENODE_REPLICATION_INTERVAL = 1; //replication interval - Random myrand = new Random(); + final Random myrand = new Random(); Path hostsFile; Path excludeFile; FileSystem localFileSys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java index 5fc567a2131..e7f3b9fc342 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java @@ -52,10 +52,10 @@ public class TestFSInputChecker { static final int HALF_CHUNK_SIZE = BYTES_PER_SUM/2; static final int FILE_SIZE = 2*BLOCK_SIZE-1; static final short NUM_OF_DATANODES = 2; - byte[] expected = new byte[FILE_SIZE]; + final byte[] expected = new byte[FILE_SIZE]; byte[] actual; FSDataInputStream stm; - Random rand = new Random(seed); + final Random rand = new Random(seed); /* create a file */ private void writeFile(FileSystem fileSys, Path name) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java index 047f67f380a..9dcd449661b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java @@ -41,8 +41,8 @@ public class TestFSOutputSummer { private static final int HALF_CHUNK_SIZE = BYTES_PER_CHECKSUM/2; private static final int FILE_SIZE = 2*BLOCK_SIZE-1; private static final short NUM_OF_DATANODES = 2; - private byte[] expected = new byte[FILE_SIZE]; - private byte[] actual = new byte[FILE_SIZE]; + private final byte[] expected = new byte[FILE_SIZE]; + private final byte[] actual = new byte[FILE_SIZE]; private FileSystem fileSys; /* create a file, write all data at once */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java index e7935fb2a70..eab3c979442 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java @@ -35,7 +35,7 @@ import org.junit.Test; public class TestFetchImage { - private static File FETCHED_IMAGE_FILE = new File( + private static final File FETCHED_IMAGE_FILE = new File( System.getProperty("build.test.dir"), "fetched-image-dir"); // Shamelessly stolen from NNStorage. private static final Pattern IMAGE_REGEX = Pattern.compile("fsimage_(\\d+)"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index e4015944692..41feabb2abb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -48,7 +48,7 @@ import org.junit.Test; * support HDFS appends. */ public class TestFileAppend{ - boolean simulatedStorage = false; + final boolean simulatedStorage = false; private static byte[] fileContents = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index d33052e4c6b..eecd23b7b79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -59,14 +59,14 @@ public class TestFileAppend2 { } static final int numBlocks = 5; - boolean simulatedStorage = false; + final boolean simulatedStorage = false; private byte[] fileContents = null; - int numDatanodes = 6; - int numberOfFiles = 50; - int numThreads = 10; - int numAppendsPerThread = 20; + final int numDatanodes = 6; + final int numberOfFiles = 50; + final int numThreads = 10; + final int numAppendsPerThread = 20; /*** int numberOfFiles = 1; int numThreads = 1; @@ -233,8 +233,8 @@ public class TestFileAppend2 { // an object that does a bunch of appends to files // class Workload extends Thread { - private int id; - private MiniDFSCluster cluster; + private final int id; + private final MiniDFSCluster cluster; Workload(MiniDFSCluster cluster, int threadIndex) { id = threadIndex; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index a5ea27e568e..4e2a41021cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -64,7 +64,7 @@ public class TestFileAppend4 { MiniDFSCluster cluster; Path file1; FSDataOutputStream stm; - boolean simulatedStorage = false; + final boolean simulatedStorage = false; { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java index bdf4c02d4b9..245652abde2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java @@ -35,7 +35,7 @@ import org.junit.Test; public class TestHdfsAdmin { private static final Path TEST_PATH = new Path("/test"); - private Configuration conf = new Configuration(); + private final Configuration conf = new Configuration(); private MiniDFSCluster cluster; @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java index 15b2e60e7cd..d38e808698d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java @@ -48,11 +48,11 @@ import org.junit.Test; * This class tests the replication and injection of blocks of a DFS file for simulated storage. */ public class TestInjectionForSimulatedStorage { - private int checksumSize = 16; - private int blockSize = checksumSize*2; - private int numBlocks = 4; - private int filesize = blockSize*numBlocks; - private int numDataNodes = 4; + private final int checksumSize = 16; + private final int blockSize = checksumSize*2; + private final int numBlocks = 4; + private final int filesize = blockSize*numBlocks; + private final int numDataNodes = 4; private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestInjectionForSimulatedStorage"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java index ce35ed5b17b..c6bd7ba2848 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java @@ -53,7 +53,7 @@ import org.junit.Test; */ public class TestIsMethodSupported { private static MiniDFSCluster cluster = null; - private static HdfsConfiguration conf = new HdfsConfiguration(); + private static final HdfsConfiguration conf = new HdfsConfiguration(); private static InetSocketAddress nnAddress = null; private static InetSocketAddress dnAddress = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index ab561ad116b..68c9126e5fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -66,7 +66,7 @@ public class TestLease { static final String dirString = "/test/lease"; final Path dir = new Path(dirString); static final Log LOG = LogFactory.getLog(TestLease.class); - Configuration conf = new HdfsConfiguration(); + final Configuration conf = new HdfsConfiguration(); @Test public void testLeaseAbort() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 005786140cf..4d075bf5e5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -67,10 +67,10 @@ public class TestLeaseRecovery2 { static final private long BLOCK_SIZE = 1024; static final private int FILE_SIZE = (int)BLOCK_SIZE*2; static final short REPLICATION_NUM = (short)3; - static byte[] buffer = new byte[FILE_SIZE]; + static final byte[] buffer = new byte[FILE_SIZE]; - static private String fakeUsername = "fakeUser1"; - static private String fakeGroup = "supergroup"; + static private final String fakeUsername = "fakeUser1"; + static private final String fakeGroup = "supergroup"; static private MiniDFSCluster cluster; static private DistributedFileSystem dfs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java index 6fcb0ed7b86..15aca1e7734 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java @@ -35,11 +35,11 @@ import org.mockito.stubbing.Answer; import com.google.common.base.Supplier; public class TestLeaseRenewer { - private String FAKE_AUTHORITY="hdfs://nn1/"; - private UserGroupInformation FAKE_UGI_A = + private final String FAKE_AUTHORITY="hdfs://nn1/"; + private final UserGroupInformation FAKE_UGI_A = UserGroupInformation.createUserForTesting( "myuser", new String[]{"group1"}); - private UserGroupInformation FAKE_UGI_B = + private final UserGroupInformation FAKE_UGI_B = UserGroupInformation.createUserForTesting( "myuser", new String[]{"group1"}); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListPathServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListPathServlet.java index 3f1a3e95b96..f3f6b2b23b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListPathServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListPathServlet.java @@ -50,8 +50,8 @@ public class TestListPathServlet { private static FileSystem fs; private static URI hftpURI; private static HftpFileSystem hftpFs; - private Random r = new Random(); - private List filelist = new ArrayList(); + private final Random r = new Random(); + private final List filelist = new ArrayList(); @BeforeClass public static void setup() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java index d10a1662932..b3a97b78315 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java @@ -42,7 +42,7 @@ public class TestPeerCache { private boolean closed = false; private final boolean hasDomain; - private DatanodeID dnId; + private final DatanodeID dnId; public FakePeer(DatanodeID dnId, boolean hasDomain) { this.dnId = dnId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java index 2c813a9582a..209a0d5599a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java @@ -46,14 +46,14 @@ import org.junit.Test; public class TestPipelines { public static final Log LOG = LogFactory.getLog(TestPipelines.class); - private static short REPL_FACTOR = 3; + private static final short REPL_FACTOR = 3; private static final int RAND_LIMIT = 2000; private static final int FILE_SIZE = 10000; private MiniDFSCluster cluster; private DistributedFileSystem fs; private static Configuration conf; - static Random rand = new Random(RAND_LIMIT); + static final Random rand = new Random(RAND_LIMIT); static { initLoggers(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitCache.java index f50b77d1364..1f53eec9250 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitCache.java @@ -69,8 +69,8 @@ public class TestShortCircuitCache { static final Log LOG = LogFactory.getLog(TestShortCircuitCache.class); private static class TestFileDescriptorPair { - TemporarySocketDirectory dir = new TemporarySocketDirectory(); - FileInputStream fis[]; + final TemporarySocketDirectory dir = new TemporarySocketDirectory(); + final FileInputStream[] fis; public TestFileDescriptorPair() throws IOException { fis = new FileInputStream[2]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java index 412212d28a5..52ef11cc344 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java @@ -89,7 +89,7 @@ public class TestShortCircuitLocalRead { static final long seed = 0xDEADBEEFL; static final int blockSize = 5120; - boolean simulatedStorage = false; + final boolean simulatedStorage = false; // creates a file but does not close it static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java index 7c440fdea0c..47a78bcc454 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java @@ -62,7 +62,7 @@ public class TestWriteRead { private boolean verboseOption = true; private boolean positionReadOption = false; private boolean truncateOption = false; - private boolean abortTestOnFailure = true; + private final boolean abortTestOnFailure = true; static private Log LOG = LogFactory.getLog(TestWriteRead.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index b4660c29eac..4f26e087cc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -67,10 +67,10 @@ import com.google.common.primitives.Bytes; public class UpgradeUtilities { // Root scratch directory on local filesystem - private static File TEST_ROOT_DIR = + private static final File TEST_ROOT_DIR = new File(MiniDFSCluster.getBaseDirectory()); // The singleton master storage directory for Namenode - private static File namenodeStorage = new File(TEST_ROOT_DIR, "namenodeMaster"); + private static final File namenodeStorage = new File(TEST_ROOT_DIR, "namenodeMaster"); // A checksum of the contents in namenodeStorage directory private static long namenodeStorageChecksum; // The namespaceId of the namenodeStorage directory @@ -82,7 +82,7 @@ public class UpgradeUtilities { // The fsscTime of the namenodeStorage directory private static long namenodeStorageFsscTime; // The singleton master storage directory for Datanode - private static File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster"); + private static final File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster"); // A checksum of the contents in datanodeStorage directory private static long datanodeStorageChecksum; // A checksum of the contents in blockpool storage directory diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java index 428a0384b77..6a87cdfa8d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java @@ -31,8 +31,8 @@ import static org.junit.Assert.*; public class TestPacketReceiver { - private static long OFFSET_IN_BLOCK = 12345L; - private static int SEQNO = 54321; + private static final long OFFSET_IN_BLOCK = 12345L; + private static final int SEQNO = 54321; private byte[] prepareFakePacket(byte[] data, byte[] sums) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java index a4f67e5f71c..202188d2b7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java @@ -41,7 +41,7 @@ public class MiniJournalCluster { private String baseDir; private int numJournalNodes = 3; private boolean format = true; - private Configuration conf; + private final Configuration conf; public Builder(Configuration conf) { this.conf = conf; @@ -69,8 +69,8 @@ public class MiniJournalCluster { private static final class JNInfo { private JournalNode node; - private InetSocketAddress ipcAddr; - private String httpServerURI; + private final InetSocketAddress ipcAddr; + private final String httpServerURI; private JNInfo(JournalNode node) { this.node = node; @@ -80,8 +80,8 @@ public class MiniJournalCluster { } private static final Log LOG = LogFactory.getLog(MiniJournalCluster.class); - private File baseDir; - private JNInfo nodes[]; + private final File baseDir; + private final JNInfo[] nodes; private MiniJournalCluster(Builder b) throws IOException { LOG.info("Starting MiniJournalCluster with " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java index 30a5ba6064d..e3b381c1f04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java @@ -38,7 +38,7 @@ public class MiniQJMHACluster { private MiniJournalCluster journalCluster; private final Configuration conf; - public static String NAMESERVICE = "ns1"; + public static final String NAMESERVICE = "ns1"; private static final String NN1 = "nn1"; private static final String NN2 = "nn2"; private static final int NN1_IPC_PORT = 10000; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java index 132ba748bad..6b2d1e930fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java @@ -42,10 +42,10 @@ import org.junit.Before; import org.junit.Test; public class TestNNWithQJM { - Configuration conf = new HdfsConfiguration(); + final Configuration conf = new HdfsConfiguration(); private MiniJournalCluster mjc = null; - private Path TEST_PATH = new Path("/test-dir"); - private Path TEST_PATH_2 = new Path("/test-dir"); + private final Path TEST_PATH = new Path("/test-dir"); + private final Path TEST_PATH_2 = new Path("/test-dir"); @Before public void resetSystemExit() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java index 41138d0eefa..bd9cf6f7ea3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java @@ -45,7 +45,7 @@ public class TestEpochsAreUnique { private static final String JID = "testEpochsAreUnique-jid"; private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( 12345, "mycluster", "my-bp", 0L); - private Random r = new Random(); + private final Random r = new Random(); @Test public void testSingleThreaded() throws IOException { @@ -121,7 +121,7 @@ public class TestEpochsAreUnique { } private class SometimesFaulty implements Answer> { - private float faultProbability; + private final float faultProbability; public SometimesFaulty(float faultProbability) { this.faultProbability = faultProbability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java index 89cb3d57459..ba51372222f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java @@ -46,7 +46,7 @@ public class TestIPCLoggerChannel { private static final Log LOG = LogFactory.getLog( TestIPCLoggerChannel.class); - private Configuration conf = new Configuration(); + private final Configuration conf = new Configuration(); private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( 12345, "mycluster", "my-bp", 0L); private static final String JID = "test-journalid"; @@ -54,7 +54,7 @@ public class TestIPCLoggerChannel { new InetSocketAddress(0); private static final byte[] FAKE_DATA = new byte[4096]; - private QJournalProtocol mockProxy = Mockito.mock(QJournalProtocol.class); + private final QJournalProtocol mockProxy = Mockito.mock(QJournalProtocol.class); private IPCLoggerChannel ch; private static final int LIMIT_QUEUE_SIZE_MB = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index 0e1ae01d6dc..4783e8fb4fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -75,7 +75,7 @@ public class TestQJMWithFaults { private static final int NUM_WRITER_ITERS = 500; private static final int SEGMENTS_PER_WRITER = 2; - private static Configuration conf = new Configuration(); + private static final Configuration conf = new Configuration(); static { @@ -87,7 +87,7 @@ public class TestQJMWithFaults { } // Set up fault injection mock. - private static JournalFaultInjector faultInjector = + private static final JournalFaultInjector faultInjector = JournalFaultInjector.instance = Mockito.mock(JournalFaultInjector.class); /** @@ -335,7 +335,7 @@ public class TestQJMWithFaults { private static class RandomFaultyChannel extends IPCLoggerChannel { private final Random random; - private float injectionProbability = 0.1f; + private final float injectionProbability = 0.1f; private boolean isUp = true; public RandomFaultyChannel(Configuration conf, NamespaceInfo nsInfo, @@ -389,7 +389,7 @@ public class TestQJMWithFaults { private static class InvocationCountingChannel extends IPCLoggerChannel { private int rpcCount = 0; - private Map> injections = Maps.newHashMap(); + private final Map> injections = Maps.newHashMap(); public InvocationCountingChannel(Configuration conf, NamespaceInfo nsInfo, String journalId, InetSocketAddress addr) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index e2d72e6b69b..fcb8e55bebd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -81,7 +81,7 @@ public class TestQuorumJournalManager { private QuorumJournalManager qjm; private List spies; - private List toClose = Lists.newLinkedList(); + private final List toClose = Lists.newLinkedList(); static { ((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java index 9248edb5770..ed0fc95d459 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java @@ -61,7 +61,7 @@ public class TestQuorumJournalManagerUnit { private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( 12345, "mycluster", "my-bp", 0L); - private Configuration conf = new Configuration(); + private final Configuration conf = new Configuration(); private List spyLoggers; private QuorumJournalManager qjm; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java index 3dddab8fc38..96ee7bd689e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java @@ -59,7 +59,7 @@ public class TestJournal { private static final File TEST_LOG_DIR = new File( new File(MiniDFSCluster.getBaseDirectory()), "TestJournal"); - private StorageErrorReporter mockErrorReporter = Mockito.mock( + private final StorageErrorReporter mockErrorReporter = Mockito.mock( StorageErrorReporter.class); private Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 3f9f5942d6b..10b6b791175 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -62,11 +62,11 @@ public class TestJournalNode { private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( 12345, "mycluster", "my-bp", 0L); - private static File TEST_BUILD_DATA = PathUtils.getTestDir(TestJournalNode.class); + private static final File TEST_BUILD_DATA = PathUtils.getTestDir(TestJournalNode.class); private JournalNode jn; private Journal journal; - private Configuration conf = new Configuration(); + private final Configuration conf = new Configuration(); private IPCLoggerChannel ch; private String journalId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java index 1eaa0caea38..ef885280d73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java @@ -54,7 +54,7 @@ public class TestClientProtocolWithDelegationToken { public static final Log LOG = LogFactory .getLog(TestClientProtocolWithDelegationToken.class); - private static Configuration conf; + private static final Configuration conf; static { conf = new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 6a9a071b43c..242934547d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -97,13 +97,13 @@ public class TestBlockToken { } /** Directory where we can count our open file descriptors under Linux */ - static File FD_DIR = new File("/proc/self/fd/"); + static final File FD_DIR = new File("/proc/self/fd/"); - long blockKeyUpdateInterval = 10 * 60 * 1000; // 10 mins - long blockTokenLifetime = 2 * 60 * 1000; // 2 mins - ExtendedBlock block1 = new ExtendedBlock("0", 0L); - ExtendedBlock block2 = new ExtendedBlock("10", 10L); - ExtendedBlock block3 = new ExtendedBlock("-10", -108L); + final long blockKeyUpdateInterval = 10 * 60 * 1000; // 10 mins + final long blockTokenLifetime = 2 * 60 * 1000; // 2 mins + final ExtendedBlock block1 = new ExtendedBlock("0", 0L); + final ExtendedBlock block2 = new ExtendedBlock("10", 10L); + final ExtendedBlock block3 = new ExtendedBlock("-10", -108L); @Before public void disableKerberos() { @@ -114,8 +114,8 @@ public class TestBlockToken { private static class GetLengthAnswer implements Answer { - BlockTokenSecretManager sm; - BlockTokenIdentifier ident; + final BlockTokenSecretManager sm; + final BlockTokenIdentifier ident; public GetLengthAnswer(BlockTokenSecretManager sm, BlockTokenIdentifier ident) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java index 28c5194904a..50d4eae9e7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java @@ -25,7 +25,7 @@ import org.junit.Test; public class TestBalancerWithEncryptedTransfer { - private Configuration conf = new HdfsConfiguration(); + private final Configuration conf = new HdfsConfiguration(); @Before public void setUpConf() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index 0912ad90238..f646839dca3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -47,7 +47,7 @@ public class TestCorruptReplicaInfo { private static final Log LOG = LogFactory.getLog(TestCorruptReplicaInfo.class); - private Map block_map = + private final Map block_map = new HashMap(); // Allow easy block creation by block id diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java index 151d035135f..b4d89d7cb35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java @@ -28,7 +28,7 @@ import org.junit.Before; import org.junit.Test; public class TestHost2NodesMap { - private Host2NodesMap map = new Host2NodesMap(); + private final Host2NodesMap map = new Host2NodesMap(); private DatanodeDescriptor dataNodes[]; @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java index dbff77402ff..a17d32e6672 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java @@ -32,7 +32,7 @@ import com.google.common.base.Joiner; public class TestPendingDataNodeMessages { - PendingDataNodeMessages msgs = new PendingDataNodeMessages(); + final PendingDataNodeMessages msgs = new PendingDataNodeMessages(); private final Block block1Gs1 = new Block(1, 0, 1); private final Block block1Gs2 = new Block(1, 0, 2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java index e909dc9d800..d33efeeb8e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -54,7 +54,7 @@ import com.google.common.collect.Lists; * and then the under replicated block gets replicated to the datanode. */ public class TestRBWBlockInvalidation { - private static Log LOG = LogFactory.getLog(TestRBWBlockInvalidation.class); + private static final Log LOG = LogFactory.getLog(TestRBWBlockInvalidation.class); private static NumberReplicas countReplicas(final FSNamesystem namesystem, ExtendedBlock block) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 391e9ffa11f..aec32f16aaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -69,7 +69,7 @@ public class TestReplicationPolicy { ((Log4JLogger)BlockPlacementPolicy.LOG).getLogger().setLevel(Level.ALL); } - private Random random = DFSUtil.getRandom(); + private final Random random = DFSUtil.getRandom(); private static final int BLOCK_SIZE = 1024; private static final int NUM_OF_DATANODES = 6; private static NetworkTopology cluster; @@ -79,7 +79,7 @@ public class TestReplicationPolicy { private static DatanodeDescriptor dataNodes[]; private static DatanodeStorageInfo[] storages; // The interval for marking a datanode as stale, - private static long staleInterval = + private static final long staleInterval = DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT; @Rule diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java index 5bb388e4b88..9c955039d2d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java @@ -73,7 +73,7 @@ import org.xml.sax.SAXException; public class TestJspHelper { - private Configuration conf = new HdfsConfiguration(); + private final Configuration conf = new HdfsConfiguration(); private String jspWriterOutput = ""; public static class DummySecretManager extends diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index bf74b321c64..b18ff47e39b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -93,7 +93,7 @@ public abstract class BlockReportTestBase { protected MiniDFSCluster cluster; private DistributedFileSystem fs; - private static Random rand = new Random(RAND_LIMIT); + private static final Random rand = new Random(RAND_LIMIT); private static Configuration conf; @@ -832,7 +832,7 @@ public abstract class BlockReportTestBase { } private class BlockChecker extends Thread { - Path filePath; + final Path filePath; public BlockChecker(final Path filePath) { this.filePath = filePath; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 64774a2ac8c..8f98574a05a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -117,7 +117,7 @@ public class SimulatedFSDataset implements FsDatasetSpi { // information about a single block private class BInfo implements ReplicaInPipelineInterface { - Block theBlock; + final Block theBlock; private boolean finalized = false; // if not finalized => ongoing creation SimulatedOutputStream oStream = null; private long bytesAcked; @@ -329,7 +329,7 @@ public class SimulatedFSDataset implements FsDatasetSpi { * to {@link FSVolumeSet} */ private static class SimulatedStorage { - private Map map = + private final Map map = new HashMap(); private final long capacity; // in bytes @@ -827,7 +827,7 @@ public class SimulatedFSDataset implements FsDatasetSpi { byte theRepeatedData = 7; - long length; // bytes + final long length; // bytes int currentPos = 0; byte[] data = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index c1a376a195a..da4cedf9f4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -81,8 +81,8 @@ public class TestBPOfferService { private DatanodeProtocolClientSideTranslatorPB mockNN1; private DatanodeProtocolClientSideTranslatorPB mockNN2; - private NNHAStatusHeartbeat[] mockHaStatuses = new NNHAStatusHeartbeat[2]; - private int heartbeatCounts[] = new int[2]; + private final NNHAStatusHeartbeat[] mockHaStatuses = new NNHAStatusHeartbeat[2]; + private final int[] heartbeatCounts = new int[2]; private DataNode mockDn; private FsDatasetSpi mockFSDataset; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java index 0b28d554f11..dfe4209ec23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java @@ -49,7 +49,7 @@ import static org.junit.Assert.assertThat; public class TestBlockHasMultipleReplicasOnSameDN { public static final Log LOG = LogFactory.getLog(TestBlockHasMultipleReplicasOnSameDN.class); - private static short NUM_DATANODES = 2; + private static final short NUM_DATANODES = 2; private static final int BLOCK_SIZE = 1024; private static final long NUM_BLOCKS = 5; private static final long seed = 0x1BADF00DL; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java index e1bbd9a4d26..88d0c7dbcdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java @@ -37,10 +37,10 @@ import org.mockito.stubbing.Answer; public class TestBlockPoolManager { - private Log LOG = LogFactory.getLog(TestBlockPoolManager.class); - private DataNode mockDN = Mockito.mock(DataNode.class); + private final Log LOG = LogFactory.getLog(TestBlockPoolManager.class); + private final DataNode mockDN = Mockito.mock(DataNode.class); private BlockPoolManager bpm; - private StringBuilder log = new StringBuilder(); + private final StringBuilder log = new StringBuilder(); private int mockIdx = 1; @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java index c981e8c661b..faec990b2ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java @@ -36,7 +36,7 @@ import static org.junit.Assert.assertThat; public class TestBlockPoolSliceStorage { public static final Log LOG = LogFactory.getLog(TestBlockPoolSliceStorage.class); - Random rand = new Random(); + final Random rand = new Random(); BlockPoolSliceStorage storage; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java index fda2927efa6..73fdd73b171 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java @@ -45,8 +45,8 @@ import org.junit.Test; public class TestCachingStrategy { private static final Log LOG = LogFactory.getLog(TestCachingStrategy.class); - private static int MAX_TEST_FILE_LEN = 1024 * 1024; - private static int WRITE_PACKET_SIZE = DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; + private static final int MAX_TEST_FILE_LEN = 1024 * 1024; + private static final int WRITE_PACKET_SIZE = DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; private final static TestRecordingCacheTracker tracker = new TestRecordingCacheTracker(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java index 0faa5b1d05b..9d594966990 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java @@ -37,7 +37,7 @@ import org.junit.Test; * Tests if DataNode process exits if all Block Pool services exit. */ public class TestDataNodeExit { - private static long WAIT_TIME_IN_MILLIS = 10; + private static final long WAIT_TIME_IN_MILLIS = 10; Configuration conf; MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index f5b31847dc1..2e7a88f140e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -70,9 +70,9 @@ public class TestDataNodeVolumeFailure { final private int block_size = 512; MiniDFSCluster cluster = null; private Configuration conf; - int dn_num = 2; - int blocks_num = 30; - short repl=2; + final int dn_num = 2; + final int blocks_num = 30; + final short repl=2; File dataDir = null; File data_fail = null; File failedDir = null; @@ -83,7 +83,7 @@ public class TestDataNodeVolumeFailure { public int num_locs = 0; } // block id to BlockLocs - Map block_map = new HashMap (); + final Map block_map = new HashMap (); @Before public void setUp() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index f5b535d3943..d1a82ef5061 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -61,8 +61,8 @@ public class TestDirectoryScanner { private String bpid; private FsDatasetSpi fds = null; private DirectoryScanner scanner = null; - private Random rand = new Random(); - private Random r = new Random(); + private final Random rand = new Random(); + private final Random r = new Random(); static { CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index e3945f17087..2ccd7568c01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -104,7 +104,7 @@ public class TestFsDatasetCache { private static DataNode dn; private static FsDatasetSpi fsd; private static DatanodeProtocolClientSideTranslatorPB spyNN; - private static PageRounder rounder = new PageRounder(); + private static final PageRounder rounder = new PageRounder(); private static CacheManipulator prevCacheManipulator; static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java index 0bc9a6490e4..d3d02448549 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java @@ -63,7 +63,7 @@ import org.junit.Test; public class TestIncrementalBrVariations { public static final Log LOG = LogFactory.getLog(TestIncrementalBrVariations.class); - private static short NUM_DATANODES = 1; + private static final short NUM_DATANODES = 1; static final int BLOCK_SIZE = 1024; static final int NUM_BLOCKS = 10; private static final long seed = 0xFACEFEEDL; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java index 143ff283367..55b1739de7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java @@ -44,8 +44,8 @@ public class TestMultipleNNDataBlockScanner { LogFactory.getLog(TestMultipleNNDataBlockScanner.class); Configuration conf; MiniDFSCluster cluster = null; - String bpids[] = new String[3]; - FileSystem fs[] = new FileSystem[3]; + final String[] bpids = new String[3]; + final FileSystem[] fs = new FileSystem[3]; public void setUp() throws IOException { conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java index 3815af5396a..e6bf0672d56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java @@ -62,8 +62,8 @@ public class TestReadOnlySharedStorage { public static final Log LOG = LogFactory.getLog(TestReadOnlySharedStorage.class); - private static short NUM_DATANODES = 3; - private static int RO_NODE_INDEX = 0; + private static final short NUM_DATANODES = 3; + private static final int RO_NODE_INDEX = 0; private static final int BLOCK_SIZE = 1024; private static final long seed = 0x1BADF00DL; private static final Path PATH = new Path("/" + TestReadOnlySharedStorage.class.getName() + ".dat"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java index b3719ad63bb..ee610590cc2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java @@ -39,10 +39,10 @@ import com.google.common.collect.Sets; * Tests datanode refresh namenode list functionality. */ public class TestRefreshNamenodes { - private int nnPort1 = 2221; - private int nnPort2 = 2224; - private int nnPort3 = 2227; - private int nnPort4 = 2230; + private final int nnPort1 = 2221; + private final int nnPort2 = 2224; + private final int nnPort3 = 2227; + private final int nnPort4 = 2230; @Test public void testRefreshNamenodes() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java index b0c89d9397c..85afa274a88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java @@ -45,7 +45,7 @@ import static org.mockito.Matchers.anyLong; public class TestStorageReport { public static final Log LOG = LogFactory.getLog(TestStorageReport.class); - private static short REPL_FACTOR = 1; + private static final short REPL_FACTOR = 1; private static final StorageType storageType = StorageType.SSD; // pick non-default. private static Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java index ff2c9894086..65a51761c9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java @@ -69,7 +69,7 @@ public class TestInterDatanodeProtocol { private static final String ADDRESS = "0.0.0.0"; final static private int PING_INTERVAL = 1000; final static private int MIN_SLEEP_TIME = 1000; - private static Configuration conf = new HdfsConfiguration(); + private static final Configuration conf = new HdfsConfiguration(); private static class TestServer extends Server { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java index db18e2743f9..3ca924554d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java @@ -114,7 +114,7 @@ public class CreateEditsLog { startingBlockId + " to " + (currentBlockId-1)); } - static String usage = "Usage: createditlogs " + + static final String usage = "Usage: createditlogs " + " -f numFiles startingBlockIds NumBlocksPerFile [-r replicafactor] " + "[-d editsLogDirectory]\n" + " Default replication factor is 1\n" + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FileNameGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FileNameGenerator.java index 8f6d51d81b8..6f7087b9215 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FileNameGenerator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FileNameGenerator.java @@ -32,10 +32,10 @@ import java.util.Arrays; public class FileNameGenerator { private static final int DEFAULT_FILES_PER_DIRECTORY = 32; - private int[] pathIndecies = new int[20]; // this will support up to 32**20 = 2**100 = 10**30 files - private String baseDir; + private final int[] pathIndecies = new int[20]; // this will support up to 32**20 = 2**100 = 10**30 files + private final String baseDir; private String currentDir; - private int filesPerDirectory; + private final int filesPerDirectory; private long fileCount; FileNameGenerator(String baseDir) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index b32aecdb6a5..a79196c395b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -162,7 +162,7 @@ public class NNThroughputBenchmark implements Tool { protected static final String OP_ALL_NAME = "all"; protected static final String OP_ALL_USAGE = "-op all "; - protected String baseDir; + protected final String baseDir; protected short replication; protected int numThreads = 0; // number of threads protected int numOpsRequired = 0; // number of operations requested @@ -380,12 +380,12 @@ public class NNThroughputBenchmark implements Tool { * One of the threads that perform stats operations. */ private class StatsDaemon extends Thread { - private int daemonId; + private final int daemonId; private int opsPerThread; private String arg1; // argument passed to executeOp() private volatile int localNumOpsExecuted = 0; private volatile long localCumulativeTime = 0; - private OperationStatsBase statsOp; + private final OperationStatsBase statsOp; StatsDaemon(int daemonId, int nrOps, OperationStatsBase op) { this.daemonId = daemonId; @@ -890,10 +890,10 @@ public class NNThroughputBenchmark implements Tool { NamespaceInfo nsInfo; DatanodeRegistration dnRegistration; DatanodeStorage storage; //only one storage - ArrayList blocks; + final ArrayList blocks; int nrBlocks; // actual number of blocks long[] blockReportList; - int dnIdx; + final int dnIdx; /** * Return a a 6 digit integer port. @@ -1222,7 +1222,7 @@ public class NNThroughputBenchmark implements Tool { "-op replication [-datanodes T] [-nodesToDecommission D] " + "[-nodeReplicationLimit C] [-totalBlocks B] [-replication R]"; - private BlockReportStats blockReportObject; + private final BlockReportStats blockReportObject; private int numDatanodes; private int nodesToDecommission; private int nodeReplicationLimit; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index c6364b174d0..d2d24a01ad7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -48,9 +48,9 @@ public class OfflineEditsViewerHelper { private static final Log LOG = LogFactory.getLog(OfflineEditsViewerHelper.class); - long blockSize = 512; + final long blockSize = 512; MiniDFSCluster cluster = null; - Configuration config = new Configuration(); + final Configuration config = new Configuration(); /** * Generates edits with all op codes and returns the edits filename diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 2591aee7933..969ccc6edbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -68,7 +68,7 @@ import org.junit.runners.Parameterized.Parameters; @RunWith(Parameterized.class) public class TestAuditLogs { static final String auditLogFile = PathUtils.getTestDirName(TestAuditLogs.class) + "/TestAuditLogs-audit.log"; - boolean useAsyncLog; + final boolean useAsyncLog; @Parameters public static Collection data() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index c02be0b4bbb..d778c7fbddb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -121,7 +121,7 @@ public class TestCheckpoint { static final int numDatanodes = 3; short replication = 3; - static FilenameFilter tmpEditsFilter = new FilenameFilter() { + static final FilenameFilter tmpEditsFilter = new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.startsWith(NameNodeFile.EDITS_TMP.getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 0138070cd25..cb3b3311791 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -59,7 +59,7 @@ public class TestDecommissioningStatus { private static Configuration conf; private static Path dir; - ArrayList decommissionedNodes = new ArrayList(numDatanodes); + final ArrayList decommissionedNodes = new ArrayList(numDatanodes); @BeforeClass public static void setUp() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 96ee2a092d6..5a7c1f7a236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -173,11 +173,11 @@ public class TestEditLog { // an object that does a bunch of transactions // static class Transactions implements Runnable { - FSNamesystem namesystem; - int numTransactions; - short replication = 3; - long blockSize = 64; - int startIndex; + final FSNamesystem namesystem; + final int numTransactions; + final short replication = 3; + final long blockSize = 64; + final int startIndex; Transactions(FSNamesystem ns, int numTx, int startIdx) { namesystem = ns; @@ -848,8 +848,8 @@ public class TestEditLog { private static class EditLogByteInputStream extends EditLogInputStream { - private InputStream input; - private long len; + private final InputStream input; + private final long len; private int version; private FSEditLogOp.Reader reader = null; private FSEditLogLoader.PositionTrackingInputStream tracker = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index c68b7dc1b31..479ee9fe452 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -82,7 +82,7 @@ public class TestEditLogRace { */ static final int NUM_SAVE_IMAGE = 30; - private List workers = new ArrayList(); + private final List workers = new ArrayList(); private static final int NUM_DATA_NODES = 1; @@ -99,12 +99,12 @@ public class TestEditLogRace { // an object that does a bunch of transactions // static class Transactions implements Runnable { - FSNamesystem namesystem; + final FSNamesystem namesystem; short replication = 3; long blockSize = 64; volatile boolean stopped = false; volatile Thread thr; - AtomicReference caught; + final AtomicReference caught; Transactions(FSNamesystem ns, AtomicReference caught) { namesystem = ns; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index f3cbf15aae2..044de85b0b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -68,7 +68,7 @@ public class TestFSImageWithSnapshot { static final long txid = 1; private final Path dir = new Path("/TestSnapshot"); - private static String testDir = + private static final String testDir = System.getProperty("test.build.data", "build/test/data"); Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index 9fd9456d143..8cf68a7a36f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -49,7 +49,7 @@ public class TestFsLimits { static FSDirectory fs; static boolean fsIsReady; - static PermissionStatus perms + static final PermissionStatus perms = new PermissionStatus("admin", "admin", FsPermission.getDefault()); static private FSImage getMockFSImage() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index 5ee41975fc0..91c0513cb52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -55,10 +55,10 @@ public class TestHDFSConcat { private NamenodeProtocols nn; private DistributedFileSystem dfs; - private static long blockSize = 512; + private static final long blockSize = 512; - private static Configuration conf; + private static final Configuration conf; static { conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java index 805f7c87ca5..5c906b4ff43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java @@ -28,7 +28,7 @@ import org.mockito.Mockito; public class TestLeaseManager { - Configuration conf = new HdfsConfiguration(); + final Configuration conf = new HdfsConfiguration(); @Test public void testRemoveLeaseWithPrefixPath() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index d635df0c658..5c9133f34a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -52,7 +52,7 @@ import org.junit.Test; * blocks/files are also returned. */ public class TestListCorruptFileBlocks { - static Log LOG = NameNode.stateChangeLog; + static final Log LOG = NameNode.stateChangeLog; /** check if nn.getCorruptFiles() returns a file that has corrupted blocks */ @Test (timeout=300000) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index 11665efa7e2..dfd878e59e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -47,9 +47,9 @@ import com.google.common.base.Joiner; */ public class TestNNStorageRetentionFunctional { - private static File TEST_ROOT_DIR = + private static final File TEST_ROOT_DIR = new File(MiniDFSCluster.getBaseDirectory()); - private static Log LOG = LogFactory.getLog( + private static final Log LOG = LogFactory.getLog( TestNNStorageRetentionFunctional.class); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index f0f25aa2351..b3acc7532bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -51,7 +51,7 @@ import com.google.common.collect.Sets; public class TestNNStorageRetentionManager { - Configuration conf = new Configuration(); + final Configuration conf = new Configuration(); /** * For the purpose of this test, purge as many edits as we can @@ -268,13 +268,13 @@ public class TestNNStorageRetentionManager { } private class TestCaseDescription { - private Map dirRoots = Maps.newLinkedHashMap(); - private Set expectedPurgedLogs = Sets.newLinkedHashSet(); - private Set expectedPurgedImages = Sets.newLinkedHashSet(); + private final Map dirRoots = Maps.newLinkedHashMap(); + private final Set expectedPurgedLogs = Sets.newLinkedHashSet(); + private final Set expectedPurgedImages = Sets.newLinkedHashSet(); private class FakeRoot { - NameNodeDirType type; - List files; + final NameNodeDirType type; + final List files; FakeRoot(NameNodeDirType type) { this.type = type; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 0ca2f849c54..bc2c9d3dc5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -62,7 +62,7 @@ public class TestNameEditsConfigs { static final String FILE_EDITS = "current/edits"; short replication = 3; - private File base_dir = new File( + private final File base_dir = new File( PathUtils.getTestDir(TestNameEditsConfigs.class), "dfs"); @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java index 19541243e04..d35ab84184e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java @@ -272,7 +272,7 @@ public class TestNameNodeJspHelper { LIVE("[Live Datanodes(| +):(| +)]\\d"), DEAD("[Dead Datanodes(| +):(| +)]\\d"); - private Pattern pattern; + private final Pattern pattern; public Pattern getPattern() { return pattern; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 605fecee0a5..c1ef0e7a6c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -57,7 +57,7 @@ import com.google.common.collect.Sets; */ public class TestNameNodeRecovery { private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class); - private static StartupOption recoverStartOpt = StartupOption.RECOVER; + private static final StartupOption recoverStartOpt = StartupOption.RECOVER; private static final File TEST_DIR = PathUtils.getTestDir(TestNameNodeRecovery.class); static { @@ -205,7 +205,7 @@ public class TestNameNodeRecovery { * throwing an exception. */ private static class EltsTestEmptyLog extends EditLogTestSetup { - private int paddingLength; + private final int paddingLength; public EltsTestEmptyLog(int paddingLength) { this.paddingLength = paddingLength; @@ -289,7 +289,7 @@ public class TestNameNodeRecovery { * with recovery mode. */ private static class EltsTestOpcodesAfterPadding extends EditLogTestSetup { - private int paddingLength; + private final int paddingLength; public EltsTestOpcodesAfterPadding(int paddingLength) { this.paddingLength = paddingLength; @@ -448,7 +448,7 @@ public class TestNameNodeRecovery { } static class SafePaddingCorruptor implements Corruptor { - private byte padByte; + private final byte padByte; public SafePaddingCorruptor(byte padByte) { this.padByte = padByte; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java index 19d1234b40b..e5d059e3a5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java @@ -49,7 +49,7 @@ public class TestNameNodeRetryCacheMetrics { private MiniDFSCluster cluster; private FSNamesystem namesystem; private DistributedFileSystem filesystem; - private int namenodeId = 0; + private final int namenodeId = 0; private Configuration conf; private RetryCacheMetrics metrics; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index fe320d3ba5d..5e1c1925ee7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -78,7 +78,7 @@ public class TestNamenodeRetryCache { private static final byte[] CLIENT_ID = ClientId.getClientId(); private static MiniDFSCluster cluster; private static FSNamesystem namesystem; - private static PermissionStatus perm = new PermissionStatus( + private static final PermissionStatus perm = new PermissionStatus( "TestNamenodeRetryCache", null, FsPermission.getDefault()); private static DistributedFileSystem filesystem; private static int callId = 100; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java index b234d6127aa..8a5b8c584fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java @@ -35,7 +35,7 @@ public class TestSecondaryWebUi { private static MiniDFSCluster cluster; private static SecondaryNameNode snn; - private static Configuration conf = new Configuration(); + private static final Configuration conf = new Configuration(); @BeforeClass public static void setUpCluster() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index f36c367619e..9352b4a6a15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -62,8 +62,8 @@ public class TestSecurityTokenEditLog { // an object that does a bunch of transactions // static class Transactions implements Runnable { - FSNamesystem namesystem; - int numTransactions; + final FSNamesystem namesystem; + final int numTransactions; short replication = 3; long blockSize = 64; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java index daaa6d862f7..f24b801aa41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java @@ -80,16 +80,16 @@ class MockFSInputStream extends FSInputStream { public class TestStreamFile { - private HdfsConfiguration CONF = new HdfsConfiguration(); - private DFSClient clientMock = Mockito.mock(DFSClient.class); - private HttpServletRequest mockHttpServletRequest = + private final HdfsConfiguration CONF = new HdfsConfiguration(); + private final DFSClient clientMock = Mockito.mock(DFSClient.class); + private final HttpServletRequest mockHttpServletRequest = Mockito.mock(HttpServletRequest.class); - private HttpServletResponse mockHttpServletResponse = + private final HttpServletResponse mockHttpServletResponse = Mockito.mock(HttpServletResponse.class); private final ServletContext mockServletContext = Mockito.mock(ServletContext.class); - StreamFile sfile = new StreamFile() { + final StreamFile sfile = new StreamFile() { private static final long serialVersionUID = -5513776238875189473L; @Override @@ -298,7 +298,7 @@ public class TestStreamFile { } public static class ServletOutputStreamExtn extends ServletOutputStream { - private StringBuffer buffer = new StringBuffer(3); + private final StringBuffer buffer = new StringBuffer(3); public String getResult() { return buffer.toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java index 81c45f37894..048e9213f64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HAStressTestHarness.java @@ -38,10 +38,10 @@ import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; * processing, etc. */ public class HAStressTestHarness { - Configuration conf; + final Configuration conf; private MiniDFSCluster cluster; static final int BLOCK_SIZE = 1024; - TestContext testCtx = new TestContext(); + final TestContext testCtx = new TestContext(); public HAStressTestHarness() { conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index b8fe2c1a720..c57efa9cf1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -49,7 +49,7 @@ import com.google.common.base.Supplier; * Static utility functions useful for testing HA. */ public abstract class HATestUtil { - private static Log LOG = LogFactory.getLog(HATestUtil.class); + private static final Log LOG = LogFactory.getLog(HATestUtil.class); private static final String LOGICAL_HOSTNAME = "ha-nn-uri-%d"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index c90988233ca..209084006ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -72,7 +72,7 @@ import com.google.common.base.Joiner; * See HDFS-2904 for more info. **/ public class TestDelegationTokensWithHA { - private static Configuration conf = new Configuration(); + private static final Configuration conf = new Configuration(); private static final Log LOG = LogFactory.getLog(TestDelegationTokensWithHA.class); private static MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java index 3251742b3ad..ec872505d5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java @@ -44,7 +44,7 @@ import org.mockito.Mockito; */ public class TestHAConfiguration { - private FSNamesystem fsn = Mockito.mock(FSNamesystem.class); + private final FSNamesystem fsn = Mockito.mock(FSNamesystem.class); @Test public void testCheckpointerValidityChecks() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index df2238838db..6a7734dc3af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -92,7 +92,7 @@ public class TestRetryCacheWithHA { private MiniDFSCluster cluster; private DistributedFileSystem dfs; - private Configuration conf = new HdfsConfiguration(); + private final Configuration conf = new HdfsConfiguration(); /** * A dummy invocation handler extending RetryInvocationHandler. We can use @@ -100,7 +100,7 @@ public class TestRetryCacheWithHA { */ private static class DummyRetryInvocationHandler extends RetryInvocationHandler { - static AtomicBoolean block = new AtomicBoolean(false); + static final AtomicBoolean block = new AtomicBoolean(false); DummyRetryInvocationHandler( FailoverProxyProvider proxyProvider, @@ -226,8 +226,8 @@ public class TestRetryCacheWithHA { /** createSnapshot operaiton */ class CreateSnapshotOp extends AtMostOnceOp { private String snapshotPath; - private String dir; - private String snapshotName; + private final String dir; + private final String snapshotName; CreateSnapshotOp(DFSClient client, String dir, String snapshotName) { super("createSnapshot", client); @@ -269,8 +269,8 @@ public class TestRetryCacheWithHA { /** deleteSnapshot */ class DeleteSnapshotOp extends AtMostOnceOp { - private String dir; - private String snapshotName; + private final String dir; + private final String snapshotName; DeleteSnapshotOp(DFSClient client, String dir, String snapshotName) { super("deleteSnapshot", client); @@ -317,9 +317,9 @@ public class TestRetryCacheWithHA { /** renameSnapshot */ class RenameSnapshotOp extends AtMostOnceOp { - private String dir; - private String oldName; - private String newName; + private final String dir; + private final String oldName; + private final String newName; RenameSnapshotOp(DFSClient client, String dir, String oldName, String newName) { @@ -368,7 +368,7 @@ public class TestRetryCacheWithHA { /** create file operation (without OverWrite) */ class CreateOp extends AtMostOnceOp { - private String fileName; + private final String fileName; private HdfsFileStatus status; CreateOp(DFSClient client, String fileName) { @@ -416,7 +416,7 @@ public class TestRetryCacheWithHA { /** append operation */ class AppendOp extends AtMostOnceOp { - private String fileName; + private final String fileName; private LocatedBlock lbk; AppendOp(DFSClient client, String fileName) { @@ -460,8 +460,8 @@ public class TestRetryCacheWithHA { /** rename */ class RenameOp extends AtMostOnceOp { - private String oldName; - private String newName; + private final String oldName; + private final String newName; private boolean renamed; RenameOp(DFSClient client, String oldName, String newName) { @@ -503,8 +503,8 @@ public class TestRetryCacheWithHA { /** rename2 */ class Rename2Op extends AtMostOnceOp { - private String oldName; - private String newName; + private final String oldName; + private final String newName; Rename2Op(DFSClient client, String oldName, String newName) { super("rename2", client); @@ -544,9 +544,9 @@ public class TestRetryCacheWithHA { /** concat */ class ConcatOp extends AtMostOnceOp { - private String target; - private String[] srcs; - private Path[] srcPaths; + private final String target; + private final String[] srcs; + private final Path[] srcPaths; ConcatOp(DFSClient client, Path target, int numSrc) { super("concat", client); @@ -596,7 +596,7 @@ public class TestRetryCacheWithHA { /** delete */ class DeleteOp extends AtMostOnceOp { - private String target; + private final String target; private boolean deleted; DeleteOp(DFSClient client, String target) { @@ -636,8 +636,8 @@ public class TestRetryCacheWithHA { /** createSymlink */ class CreateSymlinkOp extends AtMostOnceOp { - private String target; - private String link; + private final String target; + private final String link; public CreateSymlinkOp(DFSClient client, String target, String link) { super("createSymlink", client); @@ -681,7 +681,7 @@ public class TestRetryCacheWithHA { /** updatePipeline */ class UpdatePipelineOp extends AtMostOnceOp { - private String file; + private final String file; private ExtendedBlock oldBlock; private ExtendedBlock newBlock; private DatanodeInfo[] nodes; @@ -750,7 +750,7 @@ public class TestRetryCacheWithHA { /** addCacheDirective */ class AddCacheDirectiveInfoOp extends AtMostOnceOp { - private CacheDirectiveInfo directive; + private final CacheDirectiveInfo directive; private Long result; AddCacheDirectiveInfoOp(DFSClient client, @@ -849,7 +849,7 @@ public class TestRetryCacheWithHA { /** removeCacheDirective */ class RemoveCacheDirectiveInfoOp extends AtMostOnceOp { - private CacheDirectiveInfo directive; + private final CacheDirectiveInfo directive; private long id; RemoveCacheDirectiveInfoOp(DFSClient client, String pool, @@ -897,7 +897,7 @@ public class TestRetryCacheWithHA { /** addCachePool */ class AddCachePoolOp extends AtMostOnceOp { - private String pool; + private final String pool; AddCachePoolOp(DFSClient client, String pool) { super("addCachePool", client); @@ -933,7 +933,7 @@ public class TestRetryCacheWithHA { /** modifyCachePool */ class ModifyCachePoolOp extends AtMostOnceOp { - String pool; + final String pool; ModifyCachePoolOp(DFSClient client, String pool) { super("modifyCachePool", client); @@ -970,7 +970,7 @@ public class TestRetryCacheWithHA { /** removeCachePool */ class RemoveCachePoolOp extends AtMostOnceOp { - private String pool; + private final String pool; RemoveCachePoolOp(DFSClient client, String pool) { super("removeCachePool", client); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java index c029a673ad3..88ec5962e49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java @@ -50,7 +50,7 @@ public class TestNNMetricFilesInGetListingOps { private MiniDFSCluster cluster; private DistributedFileSystem fs; - private Random rand = new Random(); + private final Random rand = new Random(); @Before public void setUp() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index d1ee008d69f..b18df9b7e59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -91,7 +91,7 @@ public class TestNameNodeMetrics { private MiniDFSCluster cluster; private DistributedFileSystem fs; - private Random rand = new Random(); + private final Random rand = new Random(); private FSNamesystem namesystem; private BlockManager bm; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java index aab8605bf13..3b8345296bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java @@ -38,7 +38,7 @@ import org.junit.Test; public class TestCheckpointsWithSnapshots { private static final Path TEST_PATH = new Path("/foo"); - private static Configuration conf = new HdfsConfiguration(); + private static final Configuration conf = new HdfsConfiguration(); static { conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java index 903d7158544..69ab04ed0a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java @@ -59,12 +59,12 @@ public class TestNestedSnapshots { } private static final long SEED = 0; - private static Random RANDOM = new Random(SEED); + private static final Random RANDOM = new Random(SEED); private static final short REPLICATION = 3; private static final long BLOCKSIZE = 1024; - private static Configuration conf = new Configuration(); + private static final Configuration conf = new Configuration(); private static MiniDFSCluster cluster; private static DistributedFileSystem hdfs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index a5a4d18d640..97121f616bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -38,7 +38,7 @@ import org.junit.Before; import org.junit.Test; public class TestOpenFilesWithSnapshot { - private Configuration conf = new Configuration(); + private final Configuration conf = new Configuration(); MiniDFSCluster cluster = null; DistributedFileSystem fs = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index d4e887949e0..72f8cbe55da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -87,12 +87,12 @@ public class TestRenameWithSnapshots { private static final short REPL_2 = 1; private static final long BLOCKSIZE = 1024; - private static Configuration conf = new Configuration(); + private static final Configuration conf = new Configuration(); private static MiniDFSCluster cluster; private static FSNamesystem fsn; private static FSDirectory fsdir; private static DistributedFileSystem hdfs; - private static String testDir = + private static final String testDir = System.getProperty("test.build.data", "build/test/data"); static private final Path dir = new Path("/testRenameWithSnapshots"); static private final Path sub1 = new Path(dir, "sub1"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index 1b7e356fcbe..4a871642542 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -98,7 +98,7 @@ public class TestSnapshot { protected static FSDirectory fsdir; protected DistributedFileSystem hdfs; - private static String testDir = + private static final String testDir = System.getProperty("test.build.data", "build/test/data"); @Rule @@ -108,7 +108,7 @@ public class TestSnapshot { * The list recording all previous snapshots. Each element in the array * records a snapshot root. */ - protected static ArrayList snapshotList = new ArrayList(); + protected static final ArrayList snapshotList = new ArrayList(); /** * Check {@link SnapshotTestHelper.TestDirectoryTree} */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java index 6dfd0effa5a..8ba3ef4ecfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java @@ -55,7 +55,7 @@ public class TestSnapshotDiffReport { protected MiniDFSCluster cluster; protected DistributedFileSystem hdfs; - private HashMap snapshotNumberMap = new HashMap(); + private final HashMap snapshotNumberMap = new HashMap(); @Before public void setUp() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java index 817a2de882d..7c1d6352eaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java @@ -44,7 +44,7 @@ public class TestSnapshotFileLength { private static final short REPLICATION = 1; private static final int BLOCKSIZE = 1024; - private static Configuration conf = new Configuration(); + private static final Configuration conf = new Configuration(); private static MiniDFSCluster cluster; private static DistributedFileSystem hdfs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java index 5e9fe297e93..c7466636593 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java @@ -33,7 +33,7 @@ public class TestSnapshotNameWithInvalidCharacters { private static final short REPLICATION = 1; private static final int BLOCKSIZE = 1024; - private static Configuration conf = new Configuration(); + private static final Configuration conf = new Configuration(); private static MiniDFSCluster cluster; private static DistributedFileSystem hdfs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index c9e4665203c..fdc8304b8ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -55,8 +55,8 @@ public class TestDFSHAAdmin { private static final Log LOG = LogFactory.getLog(TestDFSHAAdmin.class); private DFSHAAdmin tool; - private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); - private ByteArrayOutputStream outBytes = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); + private final ByteArrayOutputStream outBytes = new ByteArrayOutputStream(); private String errOutput; private String output; private HAServiceProtocol mockProtocol; @@ -68,22 +68,22 @@ public class TestDFSHAAdmin { new HAServiceStatus(HAServiceState.STANDBY) .setReadyToBecomeActive(); - private ArgumentCaptor reqInfoCaptor = + private final ArgumentCaptor reqInfoCaptor = ArgumentCaptor.forClass(StateChangeRequestInfo.class); - private static String HOST_A = "1.2.3.1"; - private static String HOST_B = "1.2.3.2"; + private static final String HOST_A = "1.2.3.1"; + private static final String HOST_B = "1.2.3.2"; // Fencer shell commands that always return true and false respectively // on Unix. - private static String FENCER_TRUE_COMMAND_UNIX = "shell(true)"; - private static String FENCER_FALSE_COMMAND_UNIX = "shell(false)"; + private static final String FENCER_TRUE_COMMAND_UNIX = "shell(true)"; + private static final String FENCER_FALSE_COMMAND_UNIX = "shell(false)"; // Fencer shell commands that always return true and false respectively // on Windows. Lacking POSIX 'true' and 'false' commands we use the DOS // commands 'rem' and 'help.exe'. - private static String FENCER_TRUE_COMMAND_WINDOWS = "shell(rem)"; - private static String FENCER_FALSE_COMMAND_WINDOWS = "shell(help.exe /? >NUL)"; + private static final String FENCER_TRUE_COMMAND_WINDOWS = "shell(rem)"; + private static final String FENCER_FALSE_COMMAND_WINDOWS = "shell(help.exe /? >NUL)"; private HdfsConfiguration getHAConf() { HdfsConfiguration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index dc6b3486562..841aa433d5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -59,7 +59,7 @@ public class TestDFSHAAdminMiniCluster { private MiniDFSCluster cluster; private Configuration conf; private DFSHAAdmin tool; - private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); private String errOutput; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java index 19d98ab6988..64e618ca15d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java @@ -45,7 +45,7 @@ public class TestOfflineEditsViewer { private static final Log LOG = LogFactory .getLog(TestOfflineEditsViewer.class); - private static String buildDir = PathUtils + private static final String buildDir = PathUtils .getTestDirName(TestOfflineEditsViewer.class); // to create edits and get edits filename diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java index c7d3b31dacf..3d930f248d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java @@ -35,7 +35,7 @@ import org.junit.Test; * on predetermined inputs */ public class TestDelimitedImageVisitor { - private static String ROOT = PathUtils.getTestDirName(TestDelimitedImageVisitor.class); + private static final String ROOT = PathUtils.getTestDirName(TestDelimitedImageVisitor.class); private static final String delim = "--"; // Record an element in the visitor and build the expected line in the output diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java index 2a9465aa90c..fa9131105b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java @@ -41,9 +41,9 @@ public class TestAtomicFileOutputStream { private static final String TEST_STRING = "hello world"; private static final String TEST_STRING_2 = "goodbye world"; - private static File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class); + private static final File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class); - private static File DST_FILE = new File(TEST_DIR, "test.txt"); + private static final File DST_FILE = new File(TEST_DIR, "test.txt"); @Before public void cleanupTestDir() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java index 8d9a3f9bbf8..31a18fb8815 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java @@ -29,7 +29,7 @@ import org.junit.Test; import com.google.common.collect.Lists; public class TestDirectBufferPool { - DirectBufferPool pool = new DirectBufferPool(); + final DirectBufferPool pool = new DirectBufferPool(); @Test public void testBasics() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java index d8d9c7379ed..3c9f340b038 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java @@ -40,7 +40,7 @@ public class TestLightWeightHashSet{ private static final Log LOG = LogFactory .getLog("org.apache.hadoop.hdfs.TestLightWeightHashSet"); - private ArrayList list = new ArrayList(); + private final ArrayList list = new ArrayList(); private final int NUM = 100; private LightWeightHashSet set; private Random rand; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java index 1ccbccf53f5..a5106e4f97a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java @@ -37,7 +37,7 @@ public class TestLightWeightLinkedSet { private static final Log LOG = LogFactory .getLog("org.apache.hadoop.hdfs.TestLightWeightLinkedSet"); - private ArrayList list = new ArrayList(); + private final ArrayList list = new ArrayList(); private final int NUM = 100; private LightWeightLinkedSet set; private Random rand; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java index 2afcfd70226..ce63ee60750 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java @@ -64,7 +64,7 @@ public class TestHftpFileSystem { private FileSystem hdfs = null; private HftpFileSystem hftpFs = null; - private static Path[] TEST_PATHS = new Path[] { + private static final Path[] TEST_PATHS = new Path[] { // URI does not encode, Request#getPathInfo returns /foo new Path("/foo;bar"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java index 0d836f9b1f9..36fd821c46f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java @@ -66,7 +66,7 @@ public class TestTokenAspect { private static final Text TOKEN_KIND = new Text("DummyFS Token"); private boolean emulateSecurityEnabled; private TokenAspect tokenAspect; - private UserGroupInformation ugi = UserGroupInformation + private final UserGroupInformation ugi = UserGroupInformation .createUserForTesting("foo", new String[] { "bar" }); private URI uri; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 63786930aec..13a5a534775 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -68,7 +68,7 @@ public class TestWebHdfsTimeouts { private InetSocketAddress nnHttpAddress; private ServerSocket serverSocket; private Thread serverThread; - private URLConnectionFactory connectionFactory = new URLConnectionFactory(new ConnectionConfigurator() { + private final URLConnectionFactory connectionFactory = new URLConnectionFactory(new ConnectionConfigurator() { @Override public HttpURLConnection configure(HttpURLConnection conn) throws IOException { conn.setReadTimeout(SHORT_SOCKET_TIMEOUT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java index 718e01873a8..69ea582bfa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java @@ -50,7 +50,7 @@ import org.junit.Test; public class TestRefreshUserMappings { private MiniDFSCluster cluster; Configuration config; - private static long groupRefreshTimeoutSec = 1; + private static final long groupRefreshTimeoutSec = 1; private String tempResource = null; public static class MockUnixGroupsMapping implements GroupMappingServiceProvider { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java index 382713be573..2d96e84ec96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java @@ -26,7 +26,7 @@ import org.apache.hadoop.util.ProgramDriver; */ public class HdfsTestDriver { - private ProgramDriver pgd; + private final ProgramDriver pgd; public HdfsTestDriver() { this(new ProgramDriver()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java index 63043494eec..3e4b1eddf97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java @@ -48,7 +48,7 @@ public class TestDelegationTokenFetcher { private Configuration conf; private URI uri; private static final String SERVICE_VALUE = "localhost:2005"; - private static String tokenFile = "file.dta"; + private static final String tokenFile = "file.dta"; @Before public void init() throws URISyntaxException, IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java index e424681177a..cf0a489de76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java @@ -294,7 +294,7 @@ public class TestDelegationTokenRemoteFetcher { private final Token token; private final String serviceUrl; - private ImmutableMap routes = ImmutableMap.of( + private final ImmutableMap routes = ImmutableMap.of( "/exception", new ExceptionHandler(), "/cancelDelegationToken", new CancelHandler(), "/getDelegationToken", new FetchHandler() ,