From f26d2adbf98890cfe350c17241f5049b89a11e3c Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Wed, 20 Nov 2013 14:43:11 +0000 Subject: [PATCH 1/3] HDFS-4516. Client crash after block allocation and NN switch before lease recovery for the same file can cause readers to fail forever. Contributed by Vinay. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1543829 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../org/apache/hadoop/hdfs/DFSClient.java | 5 ++ .../apache/hadoop/hdfs/DFSInputStream.java | 5 ++ .../apache/hadoop/hdfs/DFSOutputStream.java | 15 +++++- .../hdfs/server/namenode/FSNamesystem.java | 13 +++++ .../apache/hadoop/hdfs/DFSClientAdapter.java | 18 +++++++ .../apache/hadoop/hdfs/TestPersistBlocks.java | 2 +- .../server/namenode/ha/TestHASafeMode.java | 53 +++++++++++++++++++ 8 files changed, 111 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index dd3ee1dd474..6896714a9f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -689,6 +689,9 @@ Release 2.2.1 - UNRELEASED HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread holds the write lock (VinayaKumar B via umamahesh) + HDFS-4516. Client crash after block allocation and NN switch before lease recovery for + the same file can cause readers to fail forever (VinaayKumar B via umamahesh) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 5a41c4b88a5..86c9fe51858 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2392,6 +2392,11 @@ public class DFSClient implements java.io.Closeable { throw re.unwrapRemoteException(AccessControlException.class); } } + + @VisibleForTesting + ExtendedBlock getPreviousBlock(String file) { + return filesBeingWritten.get(file).getBlock(); + } /** * enable/disable restore failed storage. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 49ecb268646..25441ff2b4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -290,6 +290,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, final LocatedBlock last = locatedBlocks.getLastLocatedBlock(); if (last != null) { if (last.getLocations().length == 0) { + if (last.getBlockSize() == 0) { + // if the length is zero, then no data has been written to + // datanode. So no need to wait for the locations. + return 0; + } return -1; } final long len = readBlockLength(last); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 362ffe6f428..0ffb095ec64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -1708,8 +1708,9 @@ public class DFSOutputStream extends FSOutputSummer } // end synchronized waitForAckedSeqno(toWaitFor); - - if (updateLength) { + + // update the block length first time irrespective of flag + if (updateLength || persistBlocks.get()) { synchronized (this) { if (streamer != null && streamer.block != null) { lastBlockLength = streamer.block.getNumBytes(); @@ -1977,4 +1978,14 @@ public class DFSOutputStream extends FSOutputSummer public void setDropBehind(Boolean dropBehind) throws IOException { this.cachingStrategy.setDropBehind(dropBehind); } + + @VisibleForTesting + ExtendedBlock getBlock() { + return streamer.getBlock(); + } + + @VisibleForTesting + long getFileId() { + return fileId; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 472b41a0066..31304d1e97b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3722,6 +3722,19 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (uc.getNumExpectedLocations() == 0) { uc.setExpectedLocations(blockManager.getNodes(lastBlock)); } + + if (uc.getNumExpectedLocations() == 0 && uc.getNumBytes() == 0) { + // There is no datanode reported to this block. + // may be client have crashed before writing data to pipeline. + // This blocks doesn't need any recovery. + // We can remove this block and close the file. + pendingFile.removeLastBlock(lastBlock); + finalizeINodeFileUnderConstruction(src, pendingFile, + iip.getLatestSnapshot()); + NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: " + + "Removed empty last block and closed file."); + return true; + } // start recovery of the last block for this file long blockRecoveryId = nextGenerationStamp(isLegacyBlock(uc)); lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java index 7007f12a18a..5d367576091 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import java.io.IOException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; public class DFSClientAdapter { @@ -43,4 +44,21 @@ public class DFSClientAdapter { String src, long start, long length) throws IOException { return DFSClient.callGetBlockLocations(namenode, src, start, length); } + + public static ClientProtocol getNamenode(DFSClient client) throws IOException { + return client.namenode; + } + + public static DFSClient getClient(DistributedFileSystem dfs) + throws IOException { + return dfs.dfs; + } + + public static ExtendedBlock getPreviousBlock(DFSClient client, String file) { + return client.getPreviousBlock(file); + } + + public static long getFileId(DFSOutputStream out) { + return out.getFileId(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java index 424cc77a19d..4ee6a283819 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java @@ -193,7 +193,7 @@ public class TestPersistBlocks { // This would mean that blocks were successfully persisted to the log FileStatus status = fs.getFileStatus(FILE_PATH); assertTrue("Length incorrect: " + status.getLen(), - status.getLen() != len - BLOCK_SIZE); + status.getLen() == len - BLOCK_SIZE); // Verify the data showed up from before restart, sans abandoned block. FSDataInputStream readStream = fs.open(FILE_PATH); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index fd548a9422e..0c95764eba6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -34,16 +34,23 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSClientAdapter; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImage; @@ -766,4 +773,50 @@ public class TestHASafeMode { assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode()); } + /** Test NN crash and client crash/stuck immediately after block allocation */ + @Test(timeout = 100000) + public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception { + cluster.getConfiguration(0).set( + DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f"); + String testData = "testData"; + // to make sure we write the full block before creating dummy block at NN. + cluster.getConfiguration(0).setInt("io.bytes.per.checksum", + testData.length()); + cluster.restartNameNode(0); + try { + cluster.waitActive(); + cluster.transitionToActive(0); + cluster.transitionToStandby(1); + DistributedFileSystem dfs = cluster.getFileSystem(0); + String pathString = "/tmp1.txt"; + Path filePath = new Path(pathString); + FSDataOutputStream create = dfs.create(filePath, + FsPermission.getDefault(), true, 1024, (short) 3, testData.length(), + null); + create.write(testData.getBytes()); + create.hflush(); + DFSClient client = DFSClientAdapter.getClient(dfs); + // add one dummy block at NN, but not write to DataNode + ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client, + pathString); + DFSClientAdapter.getNamenode(client).addBlock( + pathString, + client.getClientName(), + new ExtendedBlock(previousBlock), + new DatanodeInfo[0], + DFSClientAdapter.getFileId((DFSOutputStream) create + .getWrappedStream()), null); + cluster.restartNameNode(0, true); + cluster.restartDataNode(0); + cluster.transitionToActive(0); + // let the block reports be processed. + Thread.sleep(2000); + FSDataInputStream is = dfs.open(filePath); + is.close(); + dfs.recoverLease(filePath);// initiate recovery + assertTrue("Recovery also should be success", dfs.recoverLease(filePath)); + } finally { + cluster.shutdown(); + } + } } From 04cf2a768c0fb1c2c5c80d2480aa072ec7e43c3f Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Wed, 20 Nov 2013 16:27:28 +0000 Subject: [PATCH 2/3] HDFS-5014. Process register commands with out holding BPOfferService lock. Contributed by Vinay. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1543861 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/datanode/BPOfferService.java | 52 +++++++++++-------- .../hdfs/server/datanode/BPServiceActor.java | 3 ++ 3 files changed, 37 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6896714a9f8..a352eb124a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -692,6 +692,9 @@ Release 2.2.1 - UNRELEASED HDFS-4516. Client crash after block allocation and NN switch before lease recovery for the same file can cause readers to fail forever (VinaayKumar B via umamahesh) + HDFS-5014. Process register commands with out holding BPOfferService lock. + (Vinaykumar B via umamahesh) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 7fca64b39e6..576917bf560 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -73,7 +73,7 @@ class BPOfferService { * This is assigned after the second phase of the * handshake. */ - DatanodeRegistration bpRegistration; + volatile DatanodeRegistration bpRegistration; private final DataNode dn; @@ -295,7 +295,7 @@ class BPOfferService { * NN, it calls this function to verify that the NN it connected to * is consistent with other NNs serving the block-pool. */ - void registrationSucceeded(BPServiceActor bpServiceActor, + synchronized void registrationSucceeded(BPServiceActor bpServiceActor, DatanodeRegistration reg) throws IOException { if (bpRegistration != null) { checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(), @@ -497,17 +497,37 @@ class BPOfferService { } } - synchronized boolean processCommandFromActor(DatanodeCommand cmd, + boolean processCommandFromActor(DatanodeCommand cmd, BPServiceActor actor) throws IOException { assert bpServices.contains(actor); - if (actor == bpServiceToActive) { - return processCommandFromActive(cmd, actor); - } else { - return processCommandFromStandby(cmd, actor); + if (cmd == null) { + return true; + } + /* + * Datanode Registration can be done asynchronously here. No need to hold + * the lock. for more info refer HDFS-5014 + */ + if (DatanodeProtocol.DNA_REGISTER == cmd.getAction()) { + // namenode requested a registration - at start or if NN lost contact + // Just logging the claiming state is OK here instead of checking the + // actor state by obtaining the lock + LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr + + " with " + actor.state + " state"); + actor.reRegister(); + return true; + } + synchronized (this) { + if (actor == bpServiceToActive) { + return processCommandFromActive(cmd, actor); + } else { + return processCommandFromStandby(cmd, actor); + } } } /** + * This method should handle all commands from Active namenode except + * DNA_REGISTER which should be handled earlier itself. * * @param cmd * @return true if further processing may be required or false otherwise. @@ -515,8 +535,6 @@ class BPOfferService { */ private boolean processCommandFromActive(DatanodeCommand cmd, BPServiceActor actor) throws IOException { - if (cmd == null) - return true; final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null; final BlockIdCommand blockIdCmd = @@ -560,11 +578,6 @@ class BPOfferService { // TODO: DNA_SHUTDOWN appears to be unused - the NN never sends this command // See HDFS-2987. throw new UnsupportedOperationException("Received unimplemented DNA_SHUTDOWN"); - case DatanodeProtocol.DNA_REGISTER: - // namenode requested a registration - at start or if NN lost contact - LOG.info("DatanodeCommand action: DNA_REGISTER"); - actor.reRegister(); - break; case DatanodeProtocol.DNA_FINALIZE: String bp = ((FinalizeCommand) cmd).getBlockPoolId(); assert getBlockPoolId().equals(bp) : @@ -604,16 +617,13 @@ class BPOfferService { return true; } + /** + * This method should handle commands from Standby namenode except + * DNA_REGISTER which should be handled earlier itself. + */ private boolean processCommandFromStandby(DatanodeCommand cmd, BPServiceActor actor) throws IOException { - if (cmd == null) - return true; switch(cmd.getAction()) { - case DatanodeProtocol.DNA_REGISTER: - // namenode requested a registration - at start or if NN lost contact - LOG.info("DatanodeCommand action from standby: DNA_REGISTER"); - actor.reRegister(); - break; case DatanodeProtocol.DNA_ACCESSKEYUPDATE: LOG.info("DatanodeCommand action from standby: DNA_ACCESSKEYUPDATE"); if (dn.isBlockTokenEnabled) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 9d2b36d9823..b854e06b0d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -29,6 +29,7 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -73,6 +74,7 @@ class BPServiceActor implements Runnable { static final Log LOG = DataNode.LOG; final InetSocketAddress nnAddr; + HAServiceState state; BPOfferService bpos; @@ -569,6 +571,7 @@ class BPServiceActor implements Runnable { // that we should actually process. bpos.updateActorStatesFromHeartbeat( this, resp.getNameNodeHaState()); + state = resp.getNameNodeHaState().getState(); long startProcessCommands = now(); if (!processCommand(resp.getCommands())) From c131ae39fcc704707e432824b548d1243861c446 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Wed, 20 Nov 2013 18:17:41 +0000 Subject: [PATCH 3/3] HDFS-5525. Inline dust templates for new Web UI. Contributed by Haohui Mai. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1543895 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 3 - .../src/main/webapps/hdfs/dfs-dust.js | 27 -- .../src/main/webapps/hdfs/dfshealth.dust.html | 265 ----------------- .../src/main/webapps/hdfs/dfshealth.html | 269 ++++++++++++++++++ .../src/main/webapps/hdfs/dfshealth.js | 11 +- .../hdfs/explorer-block-info.dust.html | 13 - .../src/main/webapps/hdfs/explorer.dust.html | 26 -- .../src/main/webapps/hdfs/explorer.html | 51 +++- .../src/main/webapps/hdfs/explorer.js | 16 +- .../src/main/webapps/static/hadoop.css | 5 + 11 files changed, 332 insertions(+), 356 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer-block-info.dust.html delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.dust.html diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a352eb124a0..eccf341d7f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -202,6 +202,8 @@ Trunk (Unreleased) HDFS-5511. improve CacheManipulator interface to allow better unit testing (cmccabe) + HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 3c92933c01b..0b1e55d46c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -551,9 +551,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/main/webapps/static/dust-full-2.0.0.min.js src/main/webapps/static/dust-helpers-1.1.1.min.js src/main/webapps/static/jquery-1.10.2.min.js - src/main/webapps/hdfs/dfshealth.dust.html - src/main/webapps/hdfs/explorer-block-info.dust.html - src/main/webapps/hdfs/explorer.dust.html diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js index b9febf24fae..e2918362db4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js @@ -59,32 +59,6 @@ }; $.extend(dust.filters, filters); - /** - * Load templates from external sources in sequential orders, and - * compile them. The loading order is important to resolve dependency. - * - * The code compile the templates on the client sides, which should be - * precompiled once we introduce the infrastructure in the building - * system. - * - * templates is an array of tuples in the format of {url, name}. - */ - function load_templates(dust, templates, success_cb, error_cb) { - if (templates.length === 0) { - success_cb(); - return; - } - - var t = templates.shift(); - $.get(t.url, function (tmpl) { - var c = dust.compile(tmpl, t.name); - dust.loadSource(c); - load_templates(dust, templates, success_cb, error_cb); - }).error(function (jqxhr, text, err) { - error_cb(t.url, jqxhr, text, err); - }); - } - /** * Load a sequence of JSON. * @@ -110,7 +84,6 @@ }); } - exports.load_templates = load_templates; exports.load_json = load_json; }($, dust, window)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html deleted file mode 100644 index e7bb5a2b123..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html +++ /dev/null @@ -1,265 +0,0 @@ - - -{#nn} -{@if cond="{DistinctVersionCount} > 1"} -
- - - There are {DistinctVersionCount} versions of datanodes currently live: - {#DistinctVersions} - {key} ({value}) {@sep},{/sep} - {/DistinctVersions} -
-{/if} - -{@if cond="{NumberOfMissingBlocks} > 0"} -
- - -

There are {NumberOfMissingBlocks} missing blocks. The following files may be corrupted:

-
-
- {#CorruptFiles} - {.}
- {/CorruptFiles} -
-

Please check the logs or run fsck in order to identify the missing blocks. See the Hadoop FAQ for common causes and potential solutions.

-
-{/if} -{/nn} - -
-
Overview
-
- {#nn} - - - - - - -
Started:{NNStarted}
Version:{Version}
Compiled:{CompileInfo}
Cluster ID:{ClusterId}
Block Pool ID:{BlockPoolId}
- {/nn} -
-
- -Browse the filesystem NameNode Logs - -
- -
-
Cluster Summary
-
- -

- Security is {#nnstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/nnstat}.

-

{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}

- -

- {#fs} - {TotalLoad} files and directories, {BlocksTotal} blocks = {FilesTotal} total filesystem object(s). - {#helper_fs_max_objects/} - {/fs} -

- {#mem.HeapMemoryUsage} -

Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {max|fmt_bytes}.

- {/mem.HeapMemoryUsage} - - {#mem.NonHeapMemoryUsage} -

Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {max|fmt_bytes}.

- {/mem.NonHeapMemoryUsage} - - {#nn} - - - - - - - - - - - - {/nn} - - {#fs} - - - - - {/fs} -
Configured Capacity:{Total|fmt_bytes}
DFS Used:{Used|fmt_bytes}
Non DFS Used:{NonDfsUsedSpace|fmt_bytes}
DFS Remaining:{Free|fmt_bytes}
DFS Used%:{PercentUsed|fmt_percentage}
DFS Remaining%:{PercentRemaining|fmt_percentage}
Block Pool Used:{BlockPoolUsedSpace|fmt_bytes}
Block Pool Used%:{PercentBlockPoolUsed|fmt_percentage}
DataNodes usages% (Min/Median/Max/stdDev): {#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev}{/NodeUsage.nodeUsage}
Live Nodes{NumLiveDataNodes} (Decommissioned: {NumDecomLiveDataNodes})
Dead Nodes{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})
Decommissioning Nodes{NumDecommissioningDataNodes}
Number of Under-Replicated Blocks{UnderReplicatedBlocks}
-
-
- -
-
-
NameNode Journal Status
-
-

Current transaction ID: {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}

- - - - - - {#nn.NameJournalStatus} - - {/nn.NameJournalStatus} - -
Journal ManagerState
{manager}{stream}
-
-
- -
-
-
NameNode Storage
-
- - - {#nn.NameDirStatuses} - {#active}{#helper_dir_status type="Active"/}{/active} - {#failed}{#helper_dir_status type="Failed"/}{/failed} - {/nn.NameDirStatuses} -
Storage DirectoryTypeState
-
-
-
- -
-
Snapshot Summary
-
- {#fs.SnapshotStats} - - - - - - - - - - -
Snapshottable directoriesSnapshotted directories
{SnapshottableDirectories}{Snapshots}
- {/fs.SnapshotStats} -
-
-
- -{#startup} -
-
Startup Progress
-
-

Elapsed Time: {elapsedTime|fmt_time}, Percent Complete: {percentComplete|fmt_percentage}

- - - - - - - - - - {#phases} - - - - - - {#steps root_file=file} - - - - - - {/steps} - {/phases} -
PhaseCompletionElapsed Time
{desc} {file} {size|fmt_bytes}{percentComplete|fmt_percentage}{elapsedTime|fmt_time}
{stepDesc} {stepFile} {stepSize|fmt_bytes} ({count}/{total}){percentComplete|fmt_percentage}
-
-
-{/startup} - -
-
-
Datanode Information
-
-
-
Nodes in operation
-
- - - - - - - - - - - - - - - - {#nn.LiveNodes} - - - - - - - - - - - - - {/nn.LiveNodes} - {#nn.DeadNodes} - - - - - - - - - - - - - {/nn.DeadNodes} -
NodeLast contactAdmin StateCapacityUsedNon DFS UsedRemainingBlocksBlock pool usedFailed Volumes
{name} ({xferaddr}){lastContact}{adminState}{capacity|fmt_bytes}{used|fmt_bytes}{nonDfsUsedSpace|fmt_bytes}{remaining|fmt_bytes}{numBlocks}{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage}){volfails}
{name} ({xferaddr}){lastContact}Dead{?decomissioned}, Decomissioned{/decomissioned}-------
-
-
-
-
Nodes being decomissioned
-
- - - - - - - - - - - {#nn.DecomNodes} - - - - - - - - {/nn.DecomNodes} -
NodeLast contactUnder replicated blocksBlocks with no live replicasUnder Replicated Blocks
In files under construction
{name} ({xferaddr}){lastContact}{underReplicatedBlocks}{decommissionOnlyReplicas}{underReplicateInOpenFiles}
-
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 0ee78739208..7bd96713395 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -36,6 +36,275 @@ + + + +