HDFS-2195. Refactor StorageDirectory to not be an non-static inner class. Contributed by Todd Lipcon

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1151707 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2011-07-28 03:19:31 +00:00
parent 6f1ef980de
commit ffbe9e5972
11 changed files with 148 additions and 141 deletions

View File

@ -608,10 +608,13 @@ Trunk (unreleased changes)
HDFS-2149. Move EditLogOp serialization formats into FsEditLogOp
implementations. (Ivan Kelly via todd)
HDFS-2191. Move datanodeMap from FSNamesystem to DatanodeManager.
HDFS-2191. Move datanodeMap from FSNamesystem to DatanodeManager.
(szetszwo)
HDFS-2200. Change FSNamesystem.LOG to package private. (szetszwo)
HDFS-2200. Change FSNamesystem.LOG to package private. (szetszwo)
HDFS-2195. Refactor StorageDirectory to not be an non-static inner class.
(todd via eli)
OPTIMIZATIONS

View File

@ -196,7 +196,7 @@ public abstract class Storage extends StorageInfo {
* One of the storage directories.
*/
@InterfaceAudience.Private
public class StorageDirectory {
public static class StorageDirectory {
final File root; // root directory
final boolean useLock; // flag to enable storage lock
final StorageDirType dirType; // storage dir type
@ -237,75 +237,11 @@ public abstract class Storage extends StorageInfo {
*/
public StorageDirType getStorageDirType() {
return dirType;
}
/**
* Read version file.
*
* @throws IOException if file cannot be read or contains inconsistent data
*/
public void read() throws IOException {
read(getVersionFile());
}
public void read(File from) throws IOException {
Properties props = readFrom(from);
getFields(props, this);
}
public Properties readFrom(File from) throws IOException {
RandomAccessFile file = new RandomAccessFile(from, "rws");
FileInputStream in = null;
Properties props = new Properties();
try {
in = new FileInputStream(file.getFD());
file.seek(0);
props.load(in);
} finally {
if (in != null) {
in.close();
}
file.close();
}
return props;
}
}
/**
* Write version file.
*
* @throws IOException
*/
public void write() throws IOException {
write(getVersionFile());
}
public void write(File to) throws IOException {
Properties props = new Properties();
setFields(props, this);
RandomAccessFile file = new RandomAccessFile(to, "rws");
FileOutputStream out = null;
try {
file.seek(0);
out = new FileOutputStream(file.getFD());
/*
* If server is interrupted before this line,
* the version file will remain unchanged.
*/
props.store(out, null);
/*
* Now the new fields are flushed to the head of the file, but file
* length can still be larger then required and therefore the file can
* contain whole or corrupted fields from its old contents in the end.
* If server is interrupted here and restarted later these extra fields
* either should not effect server behavior or should be handled
* by the server correctly.
*/
file.setLength(out.getChannel().position());
} finally {
if (out != null) {
out.close();
}
file.close();
}
public void read(File from, Storage storage) throws IOException {
Properties props = readPropertiesFile(from);
storage.setFieldsFromProperties(props, this);
}
/**
@ -457,7 +393,8 @@ public abstract class Storage extends StorageInfo {
* consistent and cannot be recovered.
* @throws IOException
*/
public StorageState analyzeStorage(StartupOption startOpt) throws IOException {
public StorageState analyzeStorage(StartupOption startOpt, Storage storage)
throws IOException {
assert root != null : "root is null";
String rootPath = root.getCanonicalPath();
try { // check that storage exists
@ -489,8 +426,9 @@ public abstract class Storage extends StorageInfo {
if (startOpt == HdfsConstants.StartupOption.FORMAT)
return StorageState.NOT_FORMATTED;
if (startOpt != HdfsConstants.StartupOption.IMPORT) {
checkOldLayoutStorage(this);
storage.checkOldLayoutStorage(this);
}
// check whether current directory is valid
@ -754,9 +692,8 @@ public abstract class Storage extends StorageInfo {
* @param props
* @throws IOException
*/
protected void getFields(Properties props,
StorageDirectory sd
) throws IOException {
protected void setFieldsFromProperties(
Properties props, StorageDirectory sd) throws IOException {
setLayoutVersion(props, sd);
setNamespaceID(props, sd);
setStorageType(props, sd);
@ -765,15 +702,14 @@ public abstract class Storage extends StorageInfo {
}
/**
* Set common storage fields.
* Set common storage fields into the given properties object.
* Should be overloaded if additional fields need to be set.
*
* @param props
* @throws IOException
* @param props the Properties object to write into
*/
protected void setFields(Properties props,
StorageDirectory sd
) throws IOException {
protected void setPropertiesFromFields(Properties props,
StorageDirectory sd)
throws IOException {
props.setProperty("layoutVersion", String.valueOf(layoutVersion));
props.setProperty("storageType", storageType.toString());
props.setProperty("namespaceID", String.valueOf(namespaceID));
@ -784,6 +720,77 @@ public abstract class Storage extends StorageInfo {
props.setProperty("cTime", String.valueOf(cTime));
}
/**
* Read properties from the VERSION file in the given storage directory.
*/
public void readProperties(StorageDirectory sd) throws IOException {
Properties props = readPropertiesFile(sd.getVersionFile());
setFieldsFromProperties(props, sd);
}
/**
* Read properties from the the previous/VERSION file in the given storage directory.
*/
public void readPreviousVersionProperties(StorageDirectory sd)
throws IOException {
Properties props = readPropertiesFile(sd.getPreviousVersionFile());
setFieldsFromProperties(props, sd);
}
/**
* Write properties to the VERSION file in the given storage directory.
*/
public void writeProperties(StorageDirectory sd) throws IOException {
writeProperties(sd.getVersionFile(), sd);
}
public void writeProperties(File to, StorageDirectory sd) throws IOException {
Properties props = new Properties();
setPropertiesFromFields(props, sd);
RandomAccessFile file = new RandomAccessFile(to, "rws");
FileOutputStream out = null;
try {
file.seek(0);
out = new FileOutputStream(file.getFD());
/*
* If server is interrupted before this line,
* the version file will remain unchanged.
*/
props.store(out, null);
/*
* Now the new fields are flushed to the head of the file, but file
* length can still be larger then required and therefore the file can
* contain whole or corrupted fields from its old contents in the end.
* If server is interrupted here and restarted later these extra fields
* either should not effect server behavior or should be handled
* by the server correctly.
*/
file.setLength(out.getChannel().position());
} finally {
if (out != null) {
out.close();
}
file.close();
}
}
public static Properties readPropertiesFile(File from) throws IOException {
RandomAccessFile file = new RandomAccessFile(from, "rws");
FileInputStream in = null;
Properties props = new Properties();
try {
in = new FileInputStream(file.getFD());
file.seek(0);
props.load(in);
} finally {
if (in != null) {
in.close();
}
file.close();
}
return props;
}
public static void rename(File from, File to) throws IOException {
if (!from.renameTo(to))
throw new IOException("Failed to rename "
@ -808,7 +815,7 @@ public abstract class Storage extends StorageInfo {
public void writeAll() throws IOException {
this.layoutVersion = FSConstants.LAYOUT_VERSION;
for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
it.next().write();
writeProperties(it.next());
}
}

View File

@ -102,7 +102,7 @@ public class BlockPoolSliceStorage extends Storage {
StorageDirectory sd = new StorageDirectory(dataDir, null, false);
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt);
curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened
switch (curState) {
case NORMAL:
@ -176,7 +176,7 @@ public class BlockPoolSliceStorage extends Storage {
this.namespaceID = nsInfo.getNamespaceID();
this.blockpoolID = nsInfo.getBlockPoolID();
this.storageType = NodeType.DATA_NODE;
bpSdir.write();
writeProperties(bpSdir);
}
/**
@ -184,7 +184,7 @@ public class BlockPoolSliceStorage extends Storage {
* VERSION file
*/
@Override
protected void setFields(Properties props, StorageDirectory sd)
protected void setPropertiesFromFields(Properties props, StorageDirectory sd)
throws IOException {
props.setProperty("layoutVersion", String.valueOf(layoutVersion));
props.setProperty("namespaceID", String.valueOf(namespaceID));
@ -208,7 +208,7 @@ public class BlockPoolSliceStorage extends Storage {
}
@Override
protected void getFields(Properties props, StorageDirectory sd)
protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
throws IOException {
setLayoutVersion(props, sd);
setNamespaceID(props, sd);
@ -237,7 +237,7 @@ public class BlockPoolSliceStorage extends Storage {
if (startOpt == StartupOption.ROLLBACK)
doRollback(sd, nsInfo); // rollback if applicable
sd.read();
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
assert this.layoutVersion >= FSConstants.LAYOUT_VERSION
: "Future version is not allowed";
@ -331,7 +331,7 @@ public class BlockPoolSliceStorage extends Storage {
assert this.namespaceID == nsInfo.getNamespaceID()
: "Data-node and name-node layout versions must be the same.";
this.cTime = nsInfo.getCTime();
bpSd.write();
writeProperties(bpSd);
// 4.rename <SD>/curernt/<bpid>/previous.tmp to <SD>/curernt/<bpid>/previous
rename(bpTmpDir, bpPrevDir);
@ -383,8 +383,7 @@ public class BlockPoolSliceStorage extends Storage {
return;
// read attributes out of the VERSION file of previous directory
DataStorage prevInfo = new DataStorage();
StorageDirectory prevSD = prevInfo.new StorageDirectory(bpSd.getRoot());
prevSD.read(prevSD.getPreviousVersionFile());
prevInfo.readPreviousVersionProperties(bpSd);
// We allow rollback to a state, which is either consistent with
// the namespace state or can be further upgraded to it.
@ -392,7 +391,7 @@ public class BlockPoolSliceStorage extends Storage {
// && ( DN.previousCTime <= NN.ctime)
if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION &&
prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
throw new InconsistentFSStateException(prevSD.getRoot(),
throw new InconsistentFSStateException(bpSd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+ " is newer than the namespace state: LV = "

View File

@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.StringUtils;
/**
* Data storage information file.
@ -153,7 +152,7 @@ public class DataStorage extends Storage {
StorageDirectory sd = new StorageDirectory(dataDir);
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt);
curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened
switch(curState) {
case NORMAL:
@ -274,7 +273,7 @@ public class DataStorage extends Storage {
this.namespaceID = nsInfo.getNamespaceID();
this.cTime = 0;
// store storageID as it currently is
sd.write();
writeProperties(sd);
}
/*
@ -282,7 +281,7 @@ public class DataStorage extends Storage {
* DataStorage VERSION file
*/
@Override
protected void setFields(Properties props,
protected void setPropertiesFromFields(Properties props,
StorageDirectory sd
) throws IOException {
props.setProperty("storageType", storageType.toString());
@ -301,7 +300,7 @@ public class DataStorage extends Storage {
* DataStorage VERSION file and verify them.
*/
@Override
protected void getFields(Properties props, StorageDirectory sd)
protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
throws IOException {
setLayoutVersion(props, sd);
setcTime(props, sd);
@ -373,7 +372,7 @@ public class DataStorage extends Storage {
if (startOpt == StartupOption.ROLLBACK) {
doRollback(sd, nsInfo); // rollback if applicable
}
sd.read();
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
"Future version is not allowed";
@ -448,7 +447,7 @@ public class DataStorage extends Storage {
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
clusterID = nsInfo.getClusterID();
layoutVersion = nsInfo.getLayoutVersion();
sd.write();
writeProperties(sd);
return;
}
@ -485,7 +484,7 @@ public class DataStorage extends Storage {
// 4. Write version file under <SD>/current
layoutVersion = FSConstants.LAYOUT_VERSION;
clusterID = nsInfo.getClusterID();
sd.write();
writeProperties(sd);
// 5. Rename <SD>/previous.tmp to <SD>/previous
rename(tmpDir, prevDir);
@ -539,14 +538,13 @@ public class DataStorage extends Storage {
if (!prevDir.exists())
return;
DataStorage prevInfo = new DataStorage();
StorageDirectory prevSD = prevInfo.new StorageDirectory(sd.getRoot());
prevSD.read(prevSD.getPreviousVersionFile());
prevInfo.readPreviousVersionProperties(sd);
// We allow rollback to a state, which is either consistent with
// the namespace state or can be further upgraded to it.
if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
&& prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback
throw new InconsistentFSStateException(prevSD.getRoot(),
throw new InconsistentFSStateException(sd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+ " is newer than the namespace state: LV = "

View File

@ -93,7 +93,7 @@ public class BackupImage extends FSImage {
StorageDirectory sd = it.next();
StorageState curState;
try {
curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
@ -112,7 +112,8 @@ public class BackupImage extends FSImage {
sd.doRecover(curState);
}
if(curState != StorageState.NOT_FORMATTED) {
sd.read(); // read and verify consistency with other directories
// read and verify consistency with other directories
storage.readProperties(sd);
}
} catch(IOException ioe) {
sd.unlock();

View File

@ -220,7 +220,7 @@ public class FSImage implements NNStorageListener, Closeable {
StorageDirectory sd = it.next();
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt);
curState = sd.analyzeStorage(startOpt, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
@ -236,7 +236,8 @@ public class FSImage implements NNStorageListener, Closeable {
}
if (curState != StorageState.NOT_FORMATTED
&& startOpt != StartupOption.ROLLBACK) {
sd.read(); // read and verify consistency with other directories
// read and verify consistency with other directories
storage.readProperties(sd);
isFormatted = true;
}
if (startOpt == StartupOption.IMPORT && isFormatted)
@ -420,14 +421,14 @@ public class FSImage implements NNStorageListener, Closeable {
if (!prevDir.exists()) { // use current directory then
LOG.info("Storage directory " + sd.getRoot()
+ " does not contain previous fs state.");
sd.read(); // read and verify consistency with other directories
// read and verify consistency with other directories
storage.readProperties(sd);
continue;
}
StorageDirectory sdPrev
= prevState.getStorage().new StorageDirectory(sd.getRoot());
// read and verify consistency of the prev dir
sdPrev.read(sdPrev.getPreviousVersionFile());
prevState.getStorage().readPreviousVersionProperties(sd);
if (prevState.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
throw new IOException(
"Cannot rollback to storage version " +
@ -695,7 +696,7 @@ public class FSImage implements NNStorageListener, Closeable {
//
// Load in bits
//
latestNameSD.read();
storage.readProperties(latestNameSD);
needToSave |= loadFSImage(NNStorage.getStorageFile(latestNameSD,
NameNodeFile.IMAGE));
@ -976,7 +977,7 @@ public class FSImage implements NNStorageListener, Closeable {
editLog.createEditLogFile(NNStorage.getStorageFile(sd,
NameNodeFile.EDITS));
// write version and time files
sd.write();
storage.writeProperties(sd);
}
@ -1076,7 +1077,7 @@ public class FSImage implements NNStorageListener, Closeable {
+ imageFile.getCanonicalPath());
}
try {
sd.write();
storage.writeProperties(sd);
} catch (IOException e) {
LOG.error("Cannot write file " + sd.getRoot(), e);

View File

@ -582,7 +582,7 @@ public class NNStorage extends Storage implements Closeable {
for (NNStorageListener listener : listeners) {
listener.formatOccurred(sd);
}
sd.write();
writeProperties(sd);
LOG.info("Storage directory " + sd.getRoot()
+ " has been successfully formatted.");
@ -669,10 +669,9 @@ public class NNStorage extends Storage implements Closeable {
}
@Override // Storage
protected void getFields(Properties props,
StorageDirectory sd
) throws IOException {
super.getFields(props, sd);
protected void setFieldsFromProperties(
Properties props, StorageDirectory sd) throws IOException {
super.setFieldsFromProperties(props, sd);
if (layoutVersion == 0) {
throw new IOException("NameNode directory "
+ sd.getRoot() + " is not formatted.");
@ -719,10 +718,10 @@ public class NNStorage extends Storage implements Closeable {
* @throws IOException
*/
@Override // Storage
protected void setFields(Properties props,
protected void setPropertiesFromFields(Properties props,
StorageDirectory sd
) throws IOException {
super.setFields(props, sd);
super.setPropertiesFromFields(props, sd);
// Set blockpoolID in version with federation support
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
props.setProperty("blockpoolID", blockpoolID);
@ -1003,7 +1002,7 @@ public class NNStorage extends Storage implements Closeable {
while(sdit.hasNext()) {
StorageDirectory sd = sdit.next();
try {
Properties props = sd.readFrom(sd.getVersionFile());
Properties props = readPropertiesFile(sd.getVersionFile());
cid = props.getProperty("clusterID");
LOG.info("current cluster id for sd="+sd.getCurrentDir() +
";lv=" + layoutVersion + ";cid=" + cid);

View File

@ -645,7 +645,7 @@ public class SecondaryNameNode implements Runnable {
"cannot access checkpoint directory.");
StorageState curState;
try {
curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -252,7 +253,9 @@ public class TestDFSUpgradeFromImage extends TestCase {
.build();
fail("Was able to start NN from 0.3.0 image");
} catch (IOException ioe) {
assertTrue(ioe.toString().contains("Old layout version is 'too old'"));
if (!ioe.toString().contains("Old layout version is 'too old'")) {
throw ioe;
}
}
}

View File

@ -407,16 +407,13 @@ public class UpgradeUtilities {
*/
public static File[] createNameNodeVersionFile(Configuration conf,
File[] parent, StorageInfo version, String bpid) throws IOException {
Storage storage = null;
Storage storage = new NNStorage(conf);
storage.setStorageInfo(version);
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
File versionFile = new File(parent[i], "VERSION");
FileUtil.fullyDelete(versionFile);
storage = new NNStorage(conf);
storage.setStorageInfo(version);
StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
sd.write(versionFile);
versionFiles[i] = versionFile;
versionFiles[i] = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
storage.writeProperties(versionFiles[i], sd);
}
return versionFiles;
}
@ -449,14 +446,13 @@ public class UpgradeUtilities {
*/
public static void createDataNodeVersionFile(File[] parent,
StorageInfo version, String bpid, String bpidToWrite) throws IOException {
DataStorage storage = null;
DataStorage storage = new DataStorage(version, "doNotCare");
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
File versionFile = new File(parent[i], "VERSION");
FileUtil.fullyDelete(versionFile);
storage = new DataStorage(version, "doNotCare");
StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
sd.write(versionFile);
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
storage.writeProperties(versionFile, sd);
versionFiles[i] = versionFile;
File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
createBlockPoolVersionFile(bpDir, version, bpidToWrite);
@ -471,9 +467,8 @@ public class UpgradeUtilities {
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
bpid);
File versionFile = new File(bpCurDir, "VERSION");
FileUtil.fullyDelete(versionFile);
StorageDirectory sd = bpStorage.new StorageDirectory(bpDir);
sd.write(versionFile);
StorageDirectory sd = new StorageDirectory(bpDir);
bpStorage.writeProperties(versionFile, sd);
}
}

View File

@ -35,6 +35,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.junit.After;
import org.junit.Before;
@ -54,7 +55,7 @@ public class TestClusterId {
Iterator<StorageDirectory> sdit =
fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);
StorageDirectory sd = sdit.next();
Properties props = sd.readFrom(sd.getVersionFile());
Properties props = Storage.readPropertiesFile(sd.getVersionFile());
String cid = props.getProperty("clusterID");
LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
return cid;