From 7be4e5bd222c6f1c40f88ee8b24b1587e157a87e Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 2 Mar 2012 01:32:49 +0000 Subject: [PATCH] HDFS-3039. Address findbugs and javadoc warnings on branch. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1296017 13f79535-47bb-0310-9956-ffa450edef68 --- .../dev-support/findbugsExcludeFile.xml | 6 +++- .../hadoop/ha/ActiveStandbyElector.java | 2 +- .../java/org/apache/hadoop/ha/HAAdmin.java | 3 +- .../apache/hadoop/ha/SshFenceByTcpPort.java | 2 +- .../org/apache/hadoop/util/ThreadUtil.java | 2 +- .../hadoop-hdfs/CHANGES.HDFS-1623.txt | 2 ++ .../dev-support/findbugsExcludeFile.xml | 8 +++++ .../java/org/apache/hadoop/hdfs/HAUtil.java | 3 +- .../apache/hadoop/hdfs/NameNodeProxies.java | 9 ++--- .../server/blockmanagement/BlockManager.java | 2 +- .../hdfs/server/datanode/BPOfferService.java | 4 +-- .../hdfs/server/namenode/FSEditLog.java | 35 +++++++++---------- .../hdfs/server/namenode/FSNamesystem.java | 4 +-- .../server/namenode/FileJournalManager.java | 8 +++-- .../server/namenode/NamenodeJspHelper.java | 4 +-- 15 files changed, 55 insertions(+), 39 deletions(-) diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml index 3624c99871f..855b0284537 100644 --- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml @@ -278,8 +278,12 @@ - + + + + + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java index e91c4ce9926..7da2d3e1bfd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java @@ -234,7 +234,7 @@ public class ActiveStandbyElector implements Watcher, StringCallback, /** * Exception thrown when there is no active leader */ - public class ActiveNotFoundException extends Exception { + public static class ActiveNotFoundException extends Exception { private static final long serialVersionUID = 3505396722342846462L; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index dedbebb58b5..3350692d683 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -262,8 +262,7 @@ public abstract class HAAdmin extends Configured implements Tool { return -1; } - int i = 0; - String cmd = argv[i++]; + String cmd = argv[0]; if (!cmd.startsWith("-")) { errOut.println("Bad command '" + cmd + "': expected command starting with '-'"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java index 88404b92fd4..cec731cf20b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java @@ -76,7 +76,7 @@ public class SshFenceByTcpPort extends Configured if (argStr != null) { // Use a dummy service when checking the arguments defined // in the configuration are parseable. - Args args = new Args(new InetSocketAddress("localhost", 8020), argStr); + new Args(new InetSocketAddress("localhost", 8020), argStr); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java index 535ac341223..6e4dfafdf73 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java @@ -30,7 +30,7 @@ public class ThreadUtil { /** * Cause the current thread to sleep as close as possible to the provided * number of milliseconds. This method will log and ignore any - * {@link InterrupedException} encountered. + * {@link InterruptedException} encountered. * * @param millis the number of milliseconds for the current thread to sleep */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt index 19551ba4aca..3e59df7433d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt @@ -248,3 +248,5 @@ HDFS-3023. Optimize entries in edits log for persistBlocks call. (todd) HDFS-2979. Balancer should use logical uri for creating failover proxy with HA enabled. (atm) HDFS-3035. Fix failure of TestFileAppendRestart due to OP_UPDATE_BLOCKS (todd) + +HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm) diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 9b5d6df12e4..5590055539f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -247,4 +247,12 @@ + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 30792984d17..34e9d2e9dd5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -99,7 +99,8 @@ public class HAUtil { nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER); if (suffixes == null) { String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY + - " must be suffixed with" + namenodeId + " for HA configuration."; + " must be suffixed with nameservice and namenode ID for HA " + + "configuration."; throw new HadoopIllegalArgumentException(msg); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index d895734f332..650c313c0ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -63,7 +63,8 @@ public class NameNodeProxies { /** * Wrapper for a client proxy as well as its associated service ID. * This is simply used as a tuple-like return type for - * {@link createProxy} and {@link createNonHaProxy}. + * {@link NameNodeProxies#createProxy} and + * {@link NameNodeProxies#createNonHAProxy}. */ public static class ProxyAndInfo { private final PROXYTYPE proxy; @@ -125,7 +126,7 @@ public class NameNodeProxies { /** * Creates an explicitly non-HA-enabled proxy object. Most of the time you - * don't want to use this, and should instead use {@link createProxy}. + * don't want to use this, and should instead use {@link NameNodeProxies#createProxy}. * * @param conf the configuration object * @param nnAddr address of the remote NN to connect to @@ -160,8 +161,8 @@ public class NameNodeProxies { conf, ugi); } else { String message = "Upsupported protocol found when creating the proxy " + - "conection to NameNode: " + - ((xface != null) ? xface.getClass().getName() : xface); + "connection to NameNode: " + + ((xface != null) ? xface.getClass().getName() : "null"); LOG.error(message); throw new IllegalStateException(message); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index cbae6f2246c..1c9b2aad4f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1918,7 +1918,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block int numCurrentReplica = countLiveNodes(storedBlock); if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED && numCurrentReplica >= minReplication) { - storedBlock = completeBlock(storedBlock.getINode(), storedBlock, false); + completeBlock(storedBlock.getINode(), storedBlock, false); } else if (storedBlock.isComplete()) { // check whether safe replication is reached for the block // only complete blocks are counted towards that. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index aaba4fff2ad..27567b543fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -173,7 +173,7 @@ class BPOfferService { } } - NamespaceInfo getNamespaceInfo() { + synchronized NamespaceInfo getNamespaceInfo() { return bpNSInfo; } @@ -366,7 +366,7 @@ class BPOfferService { } } - DatanodeRegistration createRegistration() { + synchronized DatanodeRegistration createRegistration() { Preconditions.checkState(bpNSInfo != null, "getRegistration() can only be called after initial handshake"); return dn.createBPRegistration(bpNSInfo); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 3572226d8c5..7c630d70db6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -188,7 +188,7 @@ public class FSEditLog { this.sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf); } - public void initJournalsForWrite() { + public synchronized void initJournalsForWrite() { Preconditions.checkState(state == State.UNINITIALIZED || state == State.CLOSED, "Unexpected state: %s", state); @@ -196,7 +196,7 @@ public class FSEditLog { state = State.BETWEEN_LOG_SEGMENTS; } - public void initSharedJournalsForRead() { + public synchronized void initSharedJournalsForRead() { if (state == State.OPEN_FOR_READING) { LOG.warn("Initializing shared journals for READ, already open for READ", new Exception()); @@ -209,7 +209,7 @@ public class FSEditLog { state = State.OPEN_FOR_READING; } - private void initJournals(List dirs) { + private synchronized void initJournals(List dirs) { int minimumRedundantJournals = conf.getInt( DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT); @@ -808,7 +808,7 @@ public class FSEditLog { * Used only by unit tests. */ @VisibleForTesting - List getJournals() { + synchronized List getJournals() { return journalSet.getAllJournalStreams(); } @@ -816,7 +816,7 @@ public class FSEditLog { * Used only by tests. */ @VisibleForTesting - public JournalSet getJournalSet() { + synchronized public JournalSet getJournalSet() { return journalSet; } @@ -950,17 +950,14 @@ public class FSEditLog { /** * Archive any log files that are older than the given txid. */ - public void purgeLogsOlderThan(final long minTxIdToKeep) { - synchronized (this) { - // synchronized to prevent findbugs warning about inconsistent - // synchronization. This will be JIT-ed out if asserts are - // off. - assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op - minTxIdToKeep <= curSegmentTxId : - "cannot purge logs older than txid " + minTxIdToKeep + - " when current segment starts at " + curSegmentTxId; - } + public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) { + assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op + minTxIdToKeep <= curSegmentTxId : + "cannot purge logs older than txid " + minTxIdToKeep + + " when current segment starts at " + curSegmentTxId; + // This could be improved to not need synchronization. But currently, + // journalSet is not threadsafe, so we need to synchronize this method. try { journalSet.purgeLogsOlderThan(minTxIdToKeep); } catch (IOException ex) { @@ -992,8 +989,8 @@ public class FSEditLog { // sets the initial capacity of the flush buffer. - public void setOutputBufferCapacity(int size) { - journalSet.setOutputBufferCapacity(size); + synchronized void setOutputBufferCapacity(int size) { + journalSet.setOutputBufferCapacity(size); } /** @@ -1069,7 +1066,7 @@ public class FSEditLog { /** * Run recovery on all journals to recover any unclosed segments */ - void recoverUnclosedStreams() { + synchronized void recoverUnclosedStreams() { Preconditions.checkState( state == State.BETWEEN_LOG_SEGMENTS, "May not recover segments - wrong state: %s", state); @@ -1092,7 +1089,7 @@ public class FSEditLog { * @param toAtLeast the selected streams must contain this transaction * @param inProgessOk set to true if in-progress streams are OK */ - public Collection selectInputStreams(long fromTxId, + public synchronized Collection selectInputStreams(long fromTxId, long toAtLeastTxId, boolean inProgressOk) throws IOException { List streams = new ArrayList(); EditLogInputStream stream = journalSet.getInputStream(fromTxId, inProgressOk); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 228a340f0c8..f22f8088251 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -494,7 +494,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, nnResourceChecker = new NameNodeResourceChecker(conf); checkAvailableResources(); assert safeMode != null && - !safeMode.initializedReplQueues; + !safeMode.isPopulatingReplQueues(); setBlockTotal(); blockManager.activate(conf); this.nnrmthread = new Daemon(new NameNodeResourceMonitor()); @@ -3801,7 +3801,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } } - private void adjustBlockTotals(int deltaSafe, int deltaTotal) { + private synchronized void adjustBlockTotals(int deltaSafe, int deltaTotal) { if (!shouldIncrementallyTrackBlocks) { return; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index eaaf65b5fc2..603dd000909 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -310,7 +310,9 @@ class FileJournalManager implements JournalManager { // file, but before writing anything to it. Safe to delete it. if (elf.getFile().length() == 0) { LOG.info("Deleting zero-length edit log file " + elf); - elf.getFile().delete(); + if (!elf.getFile().delete()) { + throw new IOException("Unable to delete file " + elf.getFile()); + } continue; } @@ -328,7 +330,9 @@ class FileJournalManager implements JournalManager { // delete the file. if (elf.getNumTransactions() == 0) { LOG.info("Deleting edit log file with zero transactions " + elf); - elf.getFile().delete(); + if (!elf.getFile().delete()) { + throw new IOException("Unable to delete " + elf.getFile()); + } continue; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 6b4701e0164..44c07510ba2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -315,10 +315,10 @@ class NamenodeJspHelper { // since the standby namenode doesn't compute replication queues String underReplicatedBlocks = ""; if (nn.getServiceState() == HAServiceState.ACTIVE) { - underReplicatedBlocks = new String(rowTxt() + underReplicatedBlocks = rowTxt() + colTxt("Excludes missing blocks.") + "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt() - + fsn.getBlockManager().getUnderReplicatedNotMissingBlocks()); + + fsn.getBlockManager().getUnderReplicatedNotMissingBlocks(); } out.print("
\n" + rowTxt() + colTxt() + "Configured Capacity" + colTxt() + ":" + colTxt()