diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dd3ee1dd474..eccf341d7f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -202,6 +202,8 @@ Trunk (Unreleased)
HDFS-5511. improve CacheManipulator interface to allow better unit testing
(cmccabe)
+ HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
+
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
@@ -689,6 +691,12 @@ Release 2.2.1 - UNRELEASED
HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread
holds the write lock (VinayaKumar B via umamahesh)
+ HDFS-4516. Client crash after block allocation and NN switch before lease recovery for
+ the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
+
+ HDFS-5014. Process register commands with out holding BPOfferService lock.
+ (Vinaykumar B via umamahesh)
+
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 3c92933c01b..0b1e55d46c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -551,9 +551,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
src/main/webapps/static/dust-full-2.0.0.min.jssrc/main/webapps/static/dust-helpers-1.1.1.min.jssrc/main/webapps/static/jquery-1.10.2.min.js
- src/main/webapps/hdfs/dfshealth.dust.html
- src/main/webapps/hdfs/explorer-block-info.dust.html
- src/main/webapps/hdfs/explorer.dust.html
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 5a41c4b88a5..86c9fe51858 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2392,6 +2392,11 @@ public class DFSClient implements java.io.Closeable {
throw re.unwrapRemoteException(AccessControlException.class);
}
}
+
+ @VisibleForTesting
+ ExtendedBlock getPreviousBlock(String file) {
+ return filesBeingWritten.get(file).getBlock();
+ }
/**
* enable/disable restore failed storage.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6b506c2b57d..9882d76fda7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -290,6 +290,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
final LocatedBlock last = locatedBlocks.getLastLocatedBlock();
if (last != null) {
if (last.getLocations().length == 0) {
+ if (last.getBlockSize() == 0) {
+ // if the length is zero, then no data has been written to
+ // datanode. So no need to wait for the locations.
+ return 0;
+ }
return -1;
}
final long len = readBlockLength(last);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index c8bbf8fd841..28cd596ef92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -1726,8 +1726,9 @@ public class DFSOutputStream extends FSOutputSummer
} // end synchronized
waitForAckedSeqno(toWaitFor);
-
- if (updateLength) {
+
+ // update the block length first time irrespective of flag
+ if (updateLength || persistBlocks.get()) {
synchronized (this) {
if (streamer != null && streamer.block != null) {
lastBlockLength = streamer.block.getNumBytes();
@@ -1995,4 +1996,14 @@ public class DFSOutputStream extends FSOutputSummer
public void setDropBehind(Boolean dropBehind) throws IOException {
this.cachingStrategy.setDropBehind(dropBehind);
}
+
+ @VisibleForTesting
+ ExtendedBlock getBlock() {
+ return streamer.getBlock();
+ }
+
+ @VisibleForTesting
+ long getFileId() {
+ return fileId;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 5caf54fc6b4..30892e48c8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -74,7 +74,7 @@ class BPOfferService {
* This is assigned after the second phase of the
* handshake.
*/
- DatanodeRegistration bpRegistration;
+ volatile DatanodeRegistration bpRegistration;
private final DataNode dn;
@@ -298,7 +298,7 @@ class BPOfferService {
* NN, it calls this function to verify that the NN it connected to
* is consistent with other NNs serving the block-pool.
*/
- void registrationSucceeded(BPServiceActor bpServiceActor,
+ synchronized void registrationSucceeded(BPServiceActor bpServiceActor,
DatanodeRegistration reg) throws IOException {
if (bpRegistration != null) {
checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
@@ -501,17 +501,37 @@ class BPOfferService {
}
}
- synchronized boolean processCommandFromActor(DatanodeCommand cmd,
+ boolean processCommandFromActor(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
assert bpServices.contains(actor);
- if (actor == bpServiceToActive) {
- return processCommandFromActive(cmd, actor);
- } else {
- return processCommandFromStandby(cmd, actor);
+ if (cmd == null) {
+ return true;
+ }
+ /*
+ * Datanode Registration can be done asynchronously here. No need to hold
+ * the lock. for more info refer HDFS-5014
+ */
+ if (DatanodeProtocol.DNA_REGISTER == cmd.getAction()) {
+ // namenode requested a registration - at start or if NN lost contact
+ // Just logging the claiming state is OK here instead of checking the
+ // actor state by obtaining the lock
+ LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr
+ + " with " + actor.state + " state");
+ actor.reRegister();
+ return true;
+ }
+ synchronized (this) {
+ if (actor == bpServiceToActive) {
+ return processCommandFromActive(cmd, actor);
+ } else {
+ return processCommandFromStandby(cmd, actor);
+ }
}
}
/**
+ * This method should handle all commands from Active namenode except
+ * DNA_REGISTER which should be handled earlier itself.
*
* @param cmd
* @return true if further processing may be required or false otherwise.
@@ -519,8 +539,6 @@ class BPOfferService {
*/
private boolean processCommandFromActive(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
- if (cmd == null)
- return true;
final BlockCommand bcmd =
cmd instanceof BlockCommand? (BlockCommand)cmd: null;
final BlockIdCommand blockIdCmd =
@@ -564,11 +582,6 @@ class BPOfferService {
// TODO: DNA_SHUTDOWN appears to be unused - the NN never sends this command
// See HDFS-2987.
throw new UnsupportedOperationException("Received unimplemented DNA_SHUTDOWN");
- case DatanodeProtocol.DNA_REGISTER:
- // namenode requested a registration - at start or if NN lost contact
- LOG.info("DatanodeCommand action: DNA_REGISTER");
- actor.reRegister();
- break;
case DatanodeProtocol.DNA_FINALIZE:
String bp = ((FinalizeCommand) cmd).getBlockPoolId();
assert getBlockPoolId().equals(bp) :
@@ -608,16 +621,13 @@ class BPOfferService {
return true;
}
+ /**
+ * This method should handle commands from Standby namenode except
+ * DNA_REGISTER which should be handled earlier itself.
+ */
private boolean processCommandFromStandby(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
- if (cmd == null)
- return true;
switch(cmd.getAction()) {
- case DatanodeProtocol.DNA_REGISTER:
- // namenode requested a registration - at start or if NN lost contact
- LOG.info("DatanodeCommand action from standby: DNA_REGISTER");
- actor.reRegister();
- break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
LOG.info("DatanodeCommand action from standby: DNA_ACCESSKEYUPDATE");
if (dn.isBlockTokenEnabled) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index f72ed7a2eb2..7cc8cb1b950 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -28,6 +28,7 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -72,6 +73,7 @@ class BPServiceActor implements Runnable {
static final Log LOG = DataNode.LOG;
final InetSocketAddress nnAddr;
+ HAServiceState state;
BPOfferService bpos;
@@ -640,6 +642,7 @@ class BPServiceActor implements Runnable {
// that we should actually process.
bpos.updateActorStatesFromHeartbeat(
this, resp.getNameNodeHaState());
+ state = resp.getNameNodeHaState().getState();
long startProcessCommands = now();
if (!processCommand(resp.getCommands()))
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d7fef584603..d651413dd47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3711,6 +3711,19 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (uc.getNumExpectedLocations() == 0) {
uc.setExpectedLocations(blockManager.getStorages(lastBlock));
}
+
+ if (uc.getNumExpectedLocations() == 0 && uc.getNumBytes() == 0) {
+ // There is no datanode reported to this block.
+ // may be client have crashed before writing data to pipeline.
+ // This blocks doesn't need any recovery.
+ // We can remove this block and close the file.
+ pendingFile.removeLastBlock(lastBlock);
+ finalizeINodeFileUnderConstruction(src, pendingFile,
+ iip.getLatestSnapshot());
+ NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: "
+ + "Removed empty last block and closed file.");
+ return true;
+ }
// start recovery of the last block for this file
long blockRecoveryId = nextGenerationStamp(isLegacyBlock(uc));
lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js
index b9febf24fae..e2918362db4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js
@@ -59,32 +59,6 @@
};
$.extend(dust.filters, filters);
- /**
- * Load templates from external sources in sequential orders, and
- * compile them. The loading order is important to resolve dependency.
- *
- * The code compile the templates on the client sides, which should be
- * precompiled once we introduce the infrastructure in the building
- * system.
- *
- * templates is an array of tuples in the format of {url, name}.
- */
- function load_templates(dust, templates, success_cb, error_cb) {
- if (templates.length === 0) {
- success_cb();
- return;
- }
-
- var t = templates.shift();
- $.get(t.url, function (tmpl) {
- var c = dust.compile(tmpl, t.name);
- dust.loadSource(c);
- load_templates(dust, templates, success_cb, error_cb);
- }).error(function (jqxhr, text, err) {
- error_cb(t.url, jqxhr, text, err);
- });
- }
-
/**
* Load a sequence of JSON.
*
@@ -110,7 +84,6 @@
});
}
- exports.load_templates = load_templates;
exports.load_json = load_json;
}($, dust, window));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html
deleted file mode 100644
index e7bb5a2b123..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html
+++ /dev/null
@@ -1,265 +0,0 @@
-
-
-
- There are {DistinctVersionCount} versions of datanodes currently live:
- {#DistinctVersions}
- {key} ({value}) {@sep},{/sep}
- {/DistinctVersions}
-