Merging r1543710 through r1543901 from trunk to branch HDFS-2832

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1543902 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-11-20 18:37:22 +00:00
commit 75747754d2
20 changed files with 480 additions and 380 deletions

View File

@ -202,6 +202,8 @@ Trunk (Unreleased)
HDFS-5511. improve CacheManipulator interface to allow better unit testing
(cmccabe)
HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
@ -689,6 +691,12 @@ Release 2.2.1 - UNRELEASED
HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread
holds the write lock (VinayaKumar B via umamahesh)
HDFS-4516. Client crash after block allocation and NN switch before lease recovery for
the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
HDFS-5014. Process register commands with out holding BPOfferService lock.
(Vinaykumar B via umamahesh)
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES

View File

@ -551,9 +551,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<exclude>src/main/webapps/static/dust-full-2.0.0.min.js</exclude>
<exclude>src/main/webapps/static/dust-helpers-1.1.1.min.js</exclude>
<exclude>src/main/webapps/static/jquery-1.10.2.min.js</exclude>
<exclude>src/main/webapps/hdfs/dfshealth.dust.html</exclude>
<exclude>src/main/webapps/hdfs/explorer-block-info.dust.html</exclude>
<exclude>src/main/webapps/hdfs/explorer.dust.html</exclude>
</excludes>
</configuration>
</plugin>

View File

@ -2393,6 +2393,11 @@ public class DFSClient implements java.io.Closeable {
}
}
@VisibleForTesting
ExtendedBlock getPreviousBlock(String file) {
return filesBeingWritten.get(file).getBlock();
}
/**
* enable/disable restore failed storage.
*

View File

@ -290,6 +290,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
final LocatedBlock last = locatedBlocks.getLastLocatedBlock();
if (last != null) {
if (last.getLocations().length == 0) {
if (last.getBlockSize() == 0) {
// if the length is zero, then no data has been written to
// datanode. So no need to wait for the locations.
return 0;
}
return -1;
}
final long len = readBlockLength(last);

View File

@ -1727,7 +1727,8 @@ public class DFSOutputStream extends FSOutputSummer
waitForAckedSeqno(toWaitFor);
if (updateLength) {
// update the block length first time irrespective of flag
if (updateLength || persistBlocks.get()) {
synchronized (this) {
if (streamer != null && streamer.block != null) {
lastBlockLength = streamer.block.getNumBytes();
@ -1995,4 +1996,14 @@ public class DFSOutputStream extends FSOutputSummer
public void setDropBehind(Boolean dropBehind) throws IOException {
this.cachingStrategy.setDropBehind(dropBehind);
}
@VisibleForTesting
ExtendedBlock getBlock() {
return streamer.getBlock();
}
@VisibleForTesting
long getFileId() {
return fileId;
}
}

View File

@ -74,7 +74,7 @@ class BPOfferService {
* This is assigned after the second phase of the
* handshake.
*/
DatanodeRegistration bpRegistration;
volatile DatanodeRegistration bpRegistration;
private final DataNode dn;
@ -298,7 +298,7 @@ class BPOfferService {
* NN, it calls this function to verify that the NN it connected to
* is consistent with other NNs serving the block-pool.
*/
void registrationSucceeded(BPServiceActor bpServiceActor,
synchronized void registrationSucceeded(BPServiceActor bpServiceActor,
DatanodeRegistration reg) throws IOException {
if (bpRegistration != null) {
checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
@ -501,17 +501,37 @@ class BPOfferService {
}
}
synchronized boolean processCommandFromActor(DatanodeCommand cmd,
boolean processCommandFromActor(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
assert bpServices.contains(actor);
if (actor == bpServiceToActive) {
return processCommandFromActive(cmd, actor);
} else {
return processCommandFromStandby(cmd, actor);
if (cmd == null) {
return true;
}
/*
* Datanode Registration can be done asynchronously here. No need to hold
* the lock. for more info refer HDFS-5014
*/
if (DatanodeProtocol.DNA_REGISTER == cmd.getAction()) {
// namenode requested a registration - at start or if NN lost contact
// Just logging the claiming state is OK here instead of checking the
// actor state by obtaining the lock
LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr
+ " with " + actor.state + " state");
actor.reRegister();
return true;
}
synchronized (this) {
if (actor == bpServiceToActive) {
return processCommandFromActive(cmd, actor);
} else {
return processCommandFromStandby(cmd, actor);
}
}
}
/**
* This method should handle all commands from Active namenode except
* DNA_REGISTER which should be handled earlier itself.
*
* @param cmd
* @return true if further processing may be required or false otherwise.
@ -519,8 +539,6 @@ class BPOfferService {
*/
private boolean processCommandFromActive(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
if (cmd == null)
return true;
final BlockCommand bcmd =
cmd instanceof BlockCommand? (BlockCommand)cmd: null;
final BlockIdCommand blockIdCmd =
@ -564,11 +582,6 @@ class BPOfferService {
// TODO: DNA_SHUTDOWN appears to be unused - the NN never sends this command
// See HDFS-2987.
throw new UnsupportedOperationException("Received unimplemented DNA_SHUTDOWN");
case DatanodeProtocol.DNA_REGISTER:
// namenode requested a registration - at start or if NN lost contact
LOG.info("DatanodeCommand action: DNA_REGISTER");
actor.reRegister();
break;
case DatanodeProtocol.DNA_FINALIZE:
String bp = ((FinalizeCommand) cmd).getBlockPoolId();
assert getBlockPoolId().equals(bp) :
@ -608,16 +621,13 @@ class BPOfferService {
return true;
}
/**
* This method should handle commands from Standby namenode except
* DNA_REGISTER which should be handled earlier itself.
*/
private boolean processCommandFromStandby(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
if (cmd == null)
return true;
switch(cmd.getAction()) {
case DatanodeProtocol.DNA_REGISTER:
// namenode requested a registration - at start or if NN lost contact
LOG.info("DatanodeCommand action from standby: DNA_REGISTER");
actor.reRegister();
break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
LOG.info("DatanodeCommand action from standby: DNA_ACCESSKEYUPDATE");
if (dn.isBlockTokenEnabled) {

View File

@ -28,6 +28,7 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -72,6 +73,7 @@ class BPServiceActor implements Runnable {
static final Log LOG = DataNode.LOG;
final InetSocketAddress nnAddr;
HAServiceState state;
BPOfferService bpos;
@ -640,6 +642,7 @@ class BPServiceActor implements Runnable {
// that we should actually process.
bpos.updateActorStatesFromHeartbeat(
this, resp.getNameNodeHaState());
state = resp.getNameNodeHaState().getState();
long startProcessCommands = now();
if (!processCommand(resp.getCommands()))

View File

@ -3711,6 +3711,19 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (uc.getNumExpectedLocations() == 0) {
uc.setExpectedLocations(blockManager.getStorages(lastBlock));
}
if (uc.getNumExpectedLocations() == 0 && uc.getNumBytes() == 0) {
// There is no datanode reported to this block.
// may be client have crashed before writing data to pipeline.
// This blocks doesn't need any recovery.
// We can remove this block and close the file.
pendingFile.removeLastBlock(lastBlock);
finalizeINodeFileUnderConstruction(src, pendingFile,
iip.getLatestSnapshot());
NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: "
+ "Removed empty last block and closed file.");
return true;
}
// start recovery of the last block for this file
long blockRecoveryId = nextGenerationStamp(isLegacyBlock(uc));
lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);

View File

@ -59,32 +59,6 @@
};
$.extend(dust.filters, filters);
/**
* Load templates from external sources in sequential orders, and
* compile them. The loading order is important to resolve dependency.
*
* The code compile the templates on the client sides, which should be
* precompiled once we introduce the infrastructure in the building
* system.
*
* templates is an array of tuples in the format of {url, name}.
*/
function load_templates(dust, templates, success_cb, error_cb) {
if (templates.length === 0) {
success_cb();
return;
}
var t = templates.shift();
$.get(t.url, function (tmpl) {
var c = dust.compile(tmpl, t.name);
dust.loadSource(c);
load_templates(dust, templates, success_cb, error_cb);
}).error(function (jqxhr, text, err) {
error_cb(t.url, jqxhr, text, err);
});
}
/**
* Load a sequence of JSON.
*
@ -110,7 +84,6 @@
});
}
exports.load_templates = load_templates;
exports.load_json = load_json;
}($, dust, window));

View File

@ -1,265 +0,0 @@
<div class="page-header">
{#nnstat}
<h1>NameNode '{HostAndPort}' ({State})</h1>
{/nnstat}
</div>
{#nn}
{@if cond="{DistinctVersionCount} > 1"}
<div class="alert alert-dismissable alert-success">
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">&times;</button>
There are {DistinctVersionCount} versions of datanodes currently live:
{#DistinctVersions}
{key} ({value}) {@sep},{/sep}
{/DistinctVersions}
</div>
{/if}
{@if cond="{NumberOfMissingBlocks} > 0"}
<div class="alert alert-dismissable alert-warning">
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">&times;</button>
<p>There are {NumberOfMissingBlocks} missing blocks. The following files may be corrupted:</p>
<br/>
<div class="well">
{#CorruptFiles}
{.}<br/>
{/CorruptFiles}
</div>
<p>Please check the logs or run fsck in order to identify the missing blocks. See the Hadoop FAQ for common causes and potential solutions.</p>
</div>
{/if}
{/nn}
<div class="panel panel-primary">
<div class="panel-heading">Overview</div>
<div class="panel-body">
{#nn}
<table class="table table-bordered">
<tr><th>Started:</th><td>{NNStarted}</td></tr>
<tr><th>Version:</th><td>{Version}</td></tr>
<tr><th>Compiled:</th><td>{CompileInfo}</td></tr>
<tr><th>Cluster ID:</th><td>{ClusterId}</td></tr>
<tr><th>Block Pool ID:</th><td>{BlockPoolId}</td></tr>
</table>
{/nn}
</div>
</div>
<a href="explorer.html">Browse the filesystem</a> <a href="/logs/">NameNode Logs</a>
<hr/>
<div class="panel panel-primary">
<div class="panel-heading">Cluster Summary</div>
<div class="panel-body">
<p>
Security is {#nnstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/nnstat}.</p>
<p>{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}</p>
<p>
{#fs}
{TotalLoad} files and directories, {BlocksTotal} blocks = {FilesTotal} total filesystem object(s).
{#helper_fs_max_objects/}
{/fs}
</p>
{#mem.HeapMemoryUsage}
<p>Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {max|fmt_bytes}. </p>
{/mem.HeapMemoryUsage}
{#mem.NonHeapMemoryUsage}
<p>Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {max|fmt_bytes}. </p>
{/mem.NonHeapMemoryUsage}
{#nn}
<table class="table table-bordered table-striped">
<tr><th> Configured Capacity:</th><td>{Total|fmt_bytes}</td></tr>
<tr><th> DFS Used:</th><td>{Used|fmt_bytes}</td></tr>
<tr><th> Non DFS Used:</th><td>{NonDfsUsedSpace|fmt_bytes}</td></tr>
<tr><th> DFS Remaining:</th><td>{Free|fmt_bytes}</td></tr>
<tr><th> DFS Used%:</th><td>{PercentUsed|fmt_percentage}</td></tr>
<tr><th> DFS Remaining%:</th><td>{PercentRemaining|fmt_percentage}</td></tr>
<tr><th> Block Pool Used:</th><td>{BlockPoolUsedSpace|fmt_bytes}</td></tr>
<tr><th> Block Pool Used%:</th><td>{PercentBlockPoolUsed|fmt_percentage}</td></tr>
<tr><th> DataNodes usages% (Min/Median/Max/stdDev): </th>
<td>{#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev}{/NodeUsage.nodeUsage}</td></tr>
{/nn}
{#fs}
<tr><th><a href="#nodelist-operation">Live Nodes</a></th><td>{NumLiveDataNodes} (Decommissioned: {NumDecomLiveDataNodes})</td></tr>
<tr><th><a href="#nodelist-operation">Dead Nodes</a></th><td>{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})</td></tr>
<tr><th><a href="#nodelist-decom">Decommissioning Nodes</a></th><td>{NumDecommissioningDataNodes}</td></tr>
<tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
{/fs}
</table>
</div>
</div>
<hr/>
<div class="panel panel-primary">
<div class="panel-heading">NameNode Journal Status</div>
<div class="panel-body">
<p><b>Current transaction ID:</b> {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}</p>
<table class="table" title="NameNode Journals">
<thead>
<tr><th>Journal Manager</th><th>State</th></tr>
</thead>
<tbody>
{#nn.NameJournalStatus}
<tr><td>{manager}</td><td>{stream}</td></tr>
{/nn.NameJournalStatus}
</tbody>
</table>
</div>
</div>
<hr/>
<div class="panel panel-primary">
<div class="panel-heading">NameNode Storage</div>
<div class="panel-body">
<table class="table" title="NameNode Storage">
<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>
{#nn.NameDirStatuses}
{#active}{#helper_dir_status type="Active"/}{/active}
{#failed}{#helper_dir_status type="Failed"/}{/failed}
{/nn.NameDirStatuses}
</table>
</div>
</div>
<hr/>
<div class="panel panel-primary">
<div class="panel-heading">Snapshot Summary</div>
<div class="panel-body">
{#fs.SnapshotStats}
<table class="table" title="Snapshot Summary">
<thead><tr><td><b>Snapshottable directories</b></td>
<td><b>Snapshotted directories</b></td></tr>
</thead>
<tbody>
<tr>
<td>{SnapshottableDirectories}</td>
<td>{Snapshots}</td>
</tr>
</tbody>
</table>
{/fs.SnapshotStats}
</div>
</div>
<hr/>
{#startup}
<div class="panel panel-primary">
<div class="panel-heading">Startup Progress</div>
<div class="panel-body">
<p>Elapsed Time: {elapsedTime|fmt_time}, Percent Complete: {percentComplete|fmt_percentage}</p>
<table class="table">
<thead>
<tr>
<th>Phase</th>
<th>Completion</th>
<th>Elapsed Time</th>
</tr>
</thead>
<tbody>
{#phases}
<tr class="phase">
<td class="startupdesc">{desc} {file} {size|fmt_bytes}</td>
<td>{percentComplete|fmt_percentage}</td>
<td>{elapsedTime|fmt_time}</td>
</tr>
{#steps root_file=file}
<tr class="step">
<td class="startupdesc">{stepDesc} {stepFile} {stepSize|fmt_bytes} ({count}/{total})</td>
<td>{percentComplete|fmt_percentage}</td>
<td></td>
</tr>
{/steps}
{/phases}
</table>
</div>
</div>
{/startup}
<hr/>
<div class="panel panel-primary">
<div class="panel-heading">Datanode Information</div>
<div class="panel-body">
<div class="panel panel-default" id="nodelist-operation">
<div class="panel-heading">Nodes in operation</div>
<div class="panel-body">
<table class="table">
<thead>
<tr>
<th>Node</th>
<th>Last contact</th>
<th>Admin State</th>
<th>Capacity</th>
<th>Used</th>
<th>Non DFS Used</th>
<th>Remaining</th>
<th>Blocks</th>
<th>Block pool used</th>
<th>Failed Volumes</th>
</tr>
</thead>
{#nn.LiveNodes}
<tr>
<td>{name} ({xferaddr})</td>
<td>{lastContact}</td>
<td>{adminState}</td>
<td>{capacity|fmt_bytes}</td>
<td>{used|fmt_bytes}</td>
<td>{nonDfsUsedSpace|fmt_bytes}</td>
<td>{remaining|fmt_bytes}</td>
<td>{numBlocks}</td>
<td>{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage})</td>
<td>{volfails}</td>
</tr>
{/nn.LiveNodes}
{#nn.DeadNodes}
<tr class="danger">
<td>{name} ({xferaddr})</td>
<td>{lastContact}</td>
<td>Dead{?decomissioned}, Decomissioned{/decomissioned}</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>-</td>
</tr>
{/nn.DeadNodes}
</table>
</div>
</div>
<div class="panel panel-default" id="nodelist-decom">
<div class="panel-heading">Nodes being decomissioned</div>
<div class="panel-body">
<table class="table">
<thead>
<tr>
<th>Node</th>
<th>Last contact</th>
<th>Under replicated blocks</th>
<th>Blocks with no live replicas</th>
<th>Under Replicated Blocks <br/>In files under construction</th>
</tr>
</thead>
{#nn.DecomNodes}
<tr>
<td>{name} ({xferaddr})</td>
<td>{lastContact}</td>
<td>{underReplicatedBlocks}</td>
<td>{decommissionOnlyReplicas}</td>
<td>{underReplicateInOpenFiles}</td>
</tr>
{/nn.DecomNodes}
</table>
</div>
</div>
</div>
</div>

View File

@ -36,6 +36,275 @@
<div class="col-xs-1 pull-right"><a style="color: #ddd" href="dfshealth.jsp">Legacy UI</a></div>
</div>
<script type="text/x-dust-template" id="tmpl-dfshealth">
<div class="page-header">
{#nnstat}
<h1>NameNode '{HostAndPort}' ({State})</h1>
{/nnstat}
</div>
{#nn}
{@if cond="{DistinctVersionCount} > 1"}
<div class="alert alert-dismissable alert-info">
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">&times;</button>
There are {DistinctVersionCount} versions of datanodes currently live:
{#DistinctVersions}
{key} ({value}) {@sep},{/sep}
{/DistinctVersions}
</div>
{/if}
{@if cond="{NumberOfMissingBlocks} > 0"}
<div class="alert alert-dismissable alert-warning">
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">&times;</button>
<p>There are {NumberOfMissingBlocks} missing blocks. The following files may be corrupted:</p>
<br/>
<div class="well">
{#CorruptFiles}
{.}<br/>
{/CorruptFiles}
</div>
<p>Please check the logs or run fsck in order to identify the missing blocks. See the Hadoop FAQ for common causes and potential solutions.</p>
</div>
{/if}
{/nn}
<div class="panel panel-success">
<div class="panel-heading">Overview</div>
<div class="panel-body">
{#nn}
<table class="table table-bordered">
<tr><th>Started:</th><td>{NNStarted}</td></tr>
<tr><th>Version:</th><td>{Version}</td></tr>
<tr><th>Compiled:</th><td>{CompileInfo}</td></tr>
<tr><th>Cluster ID:</th><td>{ClusterId}</td></tr>
<tr><th>Block Pool ID:</th><td>{BlockPoolId}</td></tr>
</table>
{/nn}
</div>
</div>
<p><a href="explorer.html">Browse the filesystem</a></p>
<p><a href="/logs/">NameNode Logs</a></p>
<hr/>
<div class="panel panel-success">
<div class="panel-heading">Cluster Summary</div>
<div class="panel-body">
<p>
Security is {#nnstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/nnstat}.</p>
<p>{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}</p>
<p>
{#fs}
{TotalLoad} files and directories, {BlocksTotal} blocks = {FilesTotal} total filesystem object(s).
{#helper_fs_max_objects/}
{/fs}
</p>
{#mem.HeapMemoryUsage}
<p>Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {max|fmt_bytes}. </p>
{/mem.HeapMemoryUsage}
{#mem.NonHeapMemoryUsage}
<p>Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {max|fmt_bytes}. </p>
{/mem.NonHeapMemoryUsage}
{#nn}
<table class="table table-bordered table-striped">
<tr><th> Configured Capacity:</th><td>{Total|fmt_bytes}</td></tr>
<tr><th> DFS Used:</th><td>{Used|fmt_bytes}</td></tr>
<tr><th> Non DFS Used:</th><td>{NonDfsUsedSpace|fmt_bytes}</td></tr>
<tr><th> DFS Remaining:</th><td>{Free|fmt_bytes}</td></tr>
<tr><th> DFS Used%:</th><td>{PercentUsed|fmt_percentage}</td></tr>
<tr><th> DFS Remaining%:</th><td>{PercentRemaining|fmt_percentage}</td></tr>
<tr><th> Block Pool Used:</th><td>{BlockPoolUsedSpace|fmt_bytes}</td></tr>
<tr><th> Block Pool Used%:</th><td>{PercentBlockPoolUsed|fmt_percentage}</td></tr>
<tr><th> DataNodes usages% (Min/Median/Max/stdDev): </th>
<td>{#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev}{/NodeUsage.nodeUsage}</td></tr>
{/nn}
{#fs}
<tr><th><a href="#nodelist-operation">Live Nodes</a></th><td>{NumLiveDataNodes} (Decommissioned: {NumDecomLiveDataNodes})</td></tr>
<tr><th><a href="#nodelist-operation">Dead Nodes</a></th><td>{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})</td></tr>
<tr><th><a href="#nodelist-decom">Decommissioning Nodes</a></th><td>{NumDecommissioningDataNodes}</td></tr>
<tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
{/fs}
</table>
</div>
</div>
<hr/>
<div class="panel panel-success">
<div class="panel-heading">NameNode Journal Status</div>
<div class="panel-body">
<p><b>Current transaction ID:</b> {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}</p>
<table class="table" title="NameNode Journals">
<thead>
<tr><th>Journal Manager</th><th>State</th></tr>
</thead>
<tbody>
{#nn.NameJournalStatus}
<tr><td>{manager}</td><td>{stream}</td></tr>
{/nn.NameJournalStatus}
</tbody>
</table>
</div>
</div>
<hr/>
<div class="panel panel-success">
<div class="panel-heading">NameNode Storage</div>
<div class="panel-body">
<table class="table" title="NameNode Storage">
<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>
{#nn.NameDirStatuses}
{#active}{#helper_dir_status type="Active"/}{/active}
{#failed}{#helper_dir_status type="Failed"/}{/failed}
{/nn.NameDirStatuses}
</table>
</div>
</div>
<hr/>
<div class="panel panel-success">
<div class="panel-heading">Snapshot Summary</div>
<div class="panel-body">
{#fs.SnapshotStats}
<table class="table" title="Snapshot Summary">
<thead><tr><td><b>Snapshottable directories</b></td>
<td><b>Snapshotted directories</b></td></tr>
</thead>
<tbody>
<tr>
<td>{SnapshottableDirectories}</td>
<td>{Snapshots}</td>
</tr>
</tbody>
</table>
{/fs.SnapshotStats}
</div>
</div>
<hr/>
{#startup}
<div class="panel panel-success">
<div class="panel-heading">Startup Progress</div>
<div class="panel-body">
<p>Elapsed Time: {elapsedTime|fmt_time}, Percent Complete: {percentComplete|fmt_percentage}</p>
<table class="table">
<thead>
<tr>
<th>Phase</th>
<th>Completion</th>
<th>Elapsed Time</th>
</tr>
</thead>
<tbody>
{#phases}
<tr class="phase">
<td class="startupdesc">{desc} {file} {size|fmt_bytes}</td>
<td>{percentComplete|fmt_percentage}</td>
<td>{elapsedTime|fmt_time}</td>
</tr>
{#steps root_file=file}
<tr class="step">
<td class="startupdesc">{stepDesc} {stepFile} {stepSize|fmt_bytes} ({count}/{total})</td>
<td>{percentComplete|fmt_percentage}</td>
<td></td>
</tr>
{/steps}
{/phases}
</table>
</div>
</div>
{/startup}
<hr/>
<div class="panel panel-success">
<div class="panel-heading">Datanode Information</div>
<div class="panel-body">
<div class="panel panel-default" id="nodelist-operation">
<div class="panel-heading">Nodes in operation</div>
<div class="panel-body">
<table class="table">
<thead>
<tr>
<th>Node</th>
<th>Last contact</th>
<th>Admin State</th>
<th>Capacity</th>
<th>Used</th>
<th>Non DFS Used</th>
<th>Remaining</th>
<th>Blocks</th>
<th>Block pool used</th>
<th>Failed Volumes</th>
</tr>
</thead>
{#nn.LiveNodes}
<tr>
<td>{name} ({xferaddr})</td>
<td>{lastContact}</td>
<td>{adminState}</td>
<td>{capacity|fmt_bytes}</td>
<td>{used|fmt_bytes}</td>
<td>{nonDfsUsedSpace|fmt_bytes}</td>
<td>{remaining|fmt_bytes}</td>
<td>{numBlocks}</td>
<td>{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage})</td>
<td>{volfails}</td>
</tr>
{/nn.LiveNodes}
{#nn.DeadNodes}
<tr class="danger">
<td>{name} ({xferaddr})</td>
<td>{lastContact}</td>
<td>Dead{?decomissioned}, Decomissioned{/decomissioned}</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>-</td>
</tr>
{/nn.DeadNodes}
</table>
</div>
</div>
<div class="panel panel-default" id="nodelist-decom">
<div class="panel-heading">Nodes being decomissioned</div>
<div class="panel-body">
<table class="table">
<thead>
<tr>
<th>Node</th>
<th>Last contact</th>
<th>Under replicated blocks</th>
<th>Blocks with no live replicas</th>
<th>Under Replicated Blocks <br/>In files under construction</th>
</tr>
</thead>
{#nn.DecomNodes}
<tr>
<td>{name} ({xferaddr})</td>
<td>{lastContact}</td>
<td>{underReplicatedBlocks}</td>
<td>{decommissionOnlyReplicas}</td>
<td>{underReplicateInOpenFiles}</td>
</tr>
{/nn.DecomNodes}
</table>
</div>
</div>
</div>
</div>
</script>
<script type="text/javascript" src="/static/jquery-1.10.2.min.js">
</script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js">
</script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js">

View File

@ -39,14 +39,9 @@
var base = dust.makeBase(helpers);
var TEMPLATES = [ { 'name': 'dfshealth', 'url': 'dfshealth.dust.html' } ];
load_templates(dust, TEMPLATES, function() {
dust.render('dfshealth', base.push(data), function(err, out) {
$('#panel').html(out);
});
}, function () {
show_err_msg('Failed to load the page.');
dust.loadSource(dust.compile($('#tmpl-dfshealth').html(), 'dfshealth'));
dust.render('dfshealth', base.push(data), function(err, out) {
$('#panel').html(out);
});
}

View File

@ -1,13 +0,0 @@
{#block}
<p>Block ID: {blockId}</p>
<p>Block Pool ID: {blockPoolId}</p>
<p>Generation Stamp: {generationStamp}</p>
<p>Size: {numBytes}</p>
{/block}
<p>Availability:
<ul>
{#locations}
<li>{hostName}</li>
{/locations}
</ul>
</p>

View File

@ -1,26 +0,0 @@
<table class="table">
<thead>
<tr>
<th>Permission</th>
<th>Owner</th>
<th>Group</th>
<th>Size</th>
<th>Replication</th>
<th>Block Size</th>
<th>Name</th>
</tr>
</thead>
<tbody>
{#FileStatus}
<tr>
<td>{#helper_to_permission/}</td>
<td>{owner}</td>
<td>{group}</td>
<td>{length|fmt_bytes}</td>
<td>{replication}</td>
<td>{blockSize|fmt_bytes}</td>
<td><a style="cursor:pointer" inode-type="{type}" class="explorer-browse-links" inode-path="{pathSuffix}">{pathSuffix}</a></td>
</tr>
{/FileStatus}
</tbody>
</table>

View File

@ -31,9 +31,9 @@
</div>
<div class="modal-body" id="file-info-body">
<a id="file-info-download">Download</a>
<a id="file-info-preview" style="cursor:pointer">Tail the file (last 32K)</a>
<!--<a id="file-info-preview" style="cursor:pointer">Tail the file (last 32K)</a>-->
<hr />
<div class="panel panel-info" id="file-info-blockinfo-panel">
<div class="panel panel-success" id="file-info-blockinfo-panel">
<div class="panel-heading">
Block information --
<select class="btn btn-default" id="file-info-blockinfo-list">
@ -50,7 +50,7 @@
</div>
</div>
</div>
<div class="modal-footer"><button type="button" class="btn btn-primary"
<div class="modal-footer"><button type="button" class="btn btn-success"
data-dismiss="modal">Close</button></div>
</div>
</div>
@ -73,6 +73,51 @@
<br />
<div id="panel"></div>
</div>
<script type="text/x-dust-template" id="tmpl-explorer">
<table class="table">
<thead>
<tr>
<th>Permission</th>
<th>Owner</th>
<th>Group</th>
<th>Size</th>
<th>Replication</th>
<th>Block Size</th>
<th>Name</th>
</tr>
</thead>
<tbody>
{#FileStatus}
<tr>
<td>{#helper_to_permission/}</td>
<td>{owner}</td>
<td>{group}</td>
<td>{length|fmt_bytes}</td>
<td>{replication}</td>
<td>{blockSize|fmt_bytes}</td>
<td><a style="cursor:pointer" inode-type="{type}" class="explorer-browse-links" inode-path="{pathSuffix}">{pathSuffix}</a></td>
</tr>
{/FileStatus}
</tbody>
</table>
</script>
<script type="text/x-dust-template" id="tmpl-block-info">
{#block}
<p>Block ID: {blockId}</p>
<p>Block Pool ID: {blockPoolId}</p>
<p>Generation Stamp: {generationStamp}</p>
<p>Size: {numBytes}</p>
{/block}
<p>Availability:
<ul>
{#locations}
<li>{hostName}</li>
{/locations}
</ul>
</p>
</script>
<script type="text/javascript" src="/static/jquery-1.10.2.min.js">
</script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js">
</script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js">

View File

@ -164,18 +164,12 @@
function init() {
var templates = [
{ 'name': 'explorer', 'url': 'explorer.dust.html'},
{ 'name': 'block-info', 'url': 'explorer-block-info.dust.html'}
];
dust.loadSource(dust.compile($('#tmpl-explorer').html(), 'explorer'));
dust.loadSource(dust.compile($('#tmpl-block-info').html(), 'block-info'));
load_templates(dust, templates, function () {
var b = function() { browse_directory($('#directory').val()); };
$('#btn-nav-directory').click(b);
browse_directory('/');
}, function (url, jqxhr, text, err) {
network_error_handler(url)(jqxhr, text, err);
});
var b = function() { browse_directory($('#directory').val()); };
$('#btn-nav-directory').click(b);
browse_directory('/');
}
init();

View File

@ -188,3 +188,8 @@ div.security {
#startupprogress span {
font-weight: bold;
}
.panel-success > .panel-heading {
color: #fff !important;
background-color: #5FA33E !important;
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
public class DFSClientAdapter {
@ -43,4 +44,21 @@ public class DFSClientAdapter {
String src, long start, long length) throws IOException {
return DFSClient.callGetBlockLocations(namenode, src, start, length);
}
public static ClientProtocol getNamenode(DFSClient client) throws IOException {
return client.namenode;
}
public static DFSClient getClient(DistributedFileSystem dfs)
throws IOException {
return dfs.dfs;
}
public static ExtendedBlock getPreviousBlock(DFSClient client, String file) {
return client.getPreviousBlock(file);
}
public static long getFileId(DFSOutputStream out) {
return out.getFileId();
}
}

View File

@ -193,7 +193,7 @@ public class TestPersistBlocks {
// This would mean that blocks were successfully persisted to the log
FileStatus status = fs.getFileStatus(FILE_PATH);
assertTrue("Length incorrect: " + status.getLen(),
status.getLen() != len - BLOCK_SIZE);
status.getLen() == len - BLOCK_SIZE);
// Verify the data showed up from before restart, sans abandoned block.
FSDataInputStream readStream = fs.open(FILE_PATH);

View File

@ -34,16 +34,23 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
@ -766,4 +773,50 @@ public class TestHASafeMode {
assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
}
/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
cluster.getConfiguration(0).set(
DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
String testData = "testData";
// to make sure we write the full block before creating dummy block at NN.
cluster.getConfiguration(0).setInt("io.bytes.per.checksum",
testData.length());
cluster.restartNameNode(0);
try {
cluster.waitActive();
cluster.transitionToActive(0);
cluster.transitionToStandby(1);
DistributedFileSystem dfs = cluster.getFileSystem(0);
String pathString = "/tmp1.txt";
Path filePath = new Path(pathString);
FSDataOutputStream create = dfs.create(filePath,
FsPermission.getDefault(), true, 1024, (short) 3, testData.length(),
null);
create.write(testData.getBytes());
create.hflush();
DFSClient client = DFSClientAdapter.getClient(dfs);
// add one dummy block at NN, but not write to DataNode
ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client,
pathString);
DFSClientAdapter.getNamenode(client).addBlock(
pathString,
client.getClientName(),
new ExtendedBlock(previousBlock),
new DatanodeInfo[0],
DFSClientAdapter.getFileId((DFSOutputStream) create
.getWrappedStream()), null);
cluster.restartNameNode(0, true);
cluster.restartDataNode(0);
cluster.transitionToActive(0);
// let the block reports be processed.
Thread.sleep(2000);
FSDataInputStream is = dfs.open(filePath);
is.close();
dfs.recoverLease(filePath);// initiate recovery
assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
} finally {
cluster.shutdown();
}
}
}