HDFS-3849. When re-loading the FSImage, we should clear the existing genStamp and leases. Contributed by Colin Patrick McCabe.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1378364 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2012-08-28 22:09:27 +00:00
parent 963d01a0af
commit d4d2bf73a9
7 changed files with 105 additions and 3 deletions

View File

@ -690,6 +690,9 @@ Branch-2 ( Unreleased changes )
HDFS-3860. HeartbeatManager#Monitor may wrongly hold the writelock of
namesystem. (Jing Zhao via atm)
HDFS-3849. When re-loading the FSImage, we should clear the existing
genStamp and leases. (Colin Patrick McCabe via atm)
BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
@ -555,9 +556,7 @@ public class FSImage implements Closeable {
* file.
*/
void reloadFromImageFile(File file, FSNamesystem target) throws IOException {
target.dir.reset();
target.dtSecretManager.reset();
target.clear();
LOG.debug("Reloading namespace from " + file);
loadFSImage(file, target, null);
}

View File

@ -365,6 +365,23 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private final boolean haEnabled;
/**
* Clear all loaded data
*/
void clear() {
dir.reset();
dtSecretManager.reset();
generationStamp.setStamp(GenerationStamp.FIRST_VALID_STAMP);
leaseManager.removeAllLeases();
}
@VisibleForTesting
LeaseManager getLeaseManager() {
return leaseManager;
}
/**
/**
* Instantiates an FSNamesystem loaded from the image and edits
* directories specified in the passed Configuration.

View File

@ -159,6 +159,12 @@ public class LeaseManager {
}
}
synchronized void removeAllLeases() {
sortedLeases.clear();
sortedLeasesByPath.clear();
leases.clear();
}
/**
* Reassign lease for file src to the new holder.
*/

View File

@ -140,6 +140,11 @@ public class SecondaryNameNode implements Runnable {
FSImage getFSImage() {
return checkpointImage;
}
@VisibleForTesting
FSNamesystem getFSNamesystem() {
return namesystem;
}
@VisibleForTesting
void setFSImage(CheckpointStorage image) {

View File

@ -1924,6 +1924,59 @@ public class TestCheckpoint {
}
}
}
/**
* Regression test for HDFS-3849. This makes sure that when we re-load the
* FSImage in the 2NN, we clear the existing leases.
*/
@Test
public void testSecondaryNameNodeWithSavedLeases() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
FSDataOutputStream fos = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(true).build();
FileSystem fs = cluster.getFileSystem();
fos = fs.create(new Path("tmpfile"));
fos.write(new byte[] { 0, 1, 2, 3 });
fos.hflush();
assertEquals(1, cluster.getNamesystem().getLeaseManager().countLease());
secondary = startSecondaryNameNode(conf);
assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease());
// Checkpoint once, so the 2NN loads the lease into its in-memory sate.
secondary.doCheckpoint();
assertEquals(1, secondary.getFSNamesystem().getLeaseManager().countLease());
fos.close();
fos = null;
// Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
// therefore needs to download a new fsimage the next time it performs a
// checkpoint.
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// Ensure that the 2NN can still perform a checkpoint.
secondary.doCheckpoint();
// And the leases have been cleared...
assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease());
} finally {
if (fos != null) {
fos.close();
}
if (secondary != null) {
secondary.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testCommandLineParsing() throws ParseException {

View File

@ -26,6 +26,9 @@ import java.net.URI;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.junit.Test;
public class TestFSNamesystem {
@ -45,4 +48,20 @@ public class TestFSNamesystem {
assertEquals(2, editsDirs.size());
}
/**
* Test that FSNamesystem#clear clears all leases.
*/
@Test
public void testFSNamespaceClearLeases() throws Exception {
Configuration conf = new HdfsConfiguration();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
LeaseManager leaseMan = fsn.getLeaseManager();
leaseMan.addLease("client1", "importantFile");
assertEquals(1, leaseMan.countLease());
fsn.clear();
leaseMan = fsn.getLeaseManager();
assertEquals(0, leaseMan.countLease());
}
}