HDFS-15580. [JDK 12] DFSTestUtil#addDataNodeLayoutVersion fails (#2309)

This commit is contained in:
Akira Ajisaka 2020-10-27 19:18:08 +09:00 committed by GitHub
parent afaab3d332
commit 3848b44b37
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 92 additions and 94 deletions

View File

@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext; import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;
@ -61,12 +60,6 @@ public interface HdfsServerConstants {
*/ */
int NAMENODE_LAYOUT_VERSION int NAMENODE_LAYOUT_VERSION
= NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION; = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
/**
* Current layout version for DataNode.
* Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version.
*/
int DATANODE_LAYOUT_VERSION
= DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
/** /**
* Path components that are reserved in HDFS. * Path components that are reserved in HDFS.
* <p> * <p>

View File

@ -224,7 +224,8 @@ public class StorageInfo {
} }
public int getServiceLayoutVersion() { public int getServiceLayoutVersion() {
return storageType == NodeType.DATA_NODE ? HdfsServerConstants.DATANODE_LAYOUT_VERSION return storageType == NodeType.DATA_NODE
? DataNodeLayoutVersion.getCurrentLayoutVersion()
: HdfsServerConstants.NAMENODE_LAYOUT_VERSION; : HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
} }

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@ -285,7 +284,7 @@ public class BlockPoolSliceStorage extends Storage {
LOG.info("Formatting block pool {} directory {}", blockpoolID, bpSdir LOG.info("Formatting block pool {} directory {}", blockpoolID, bpSdir
.getCurrentDir()); .getCurrentDir());
bpSdir.clearDirectory(); // create directory bpSdir.clearDirectory(); // create directory
this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; this.layoutVersion = DataNodeLayoutVersion.getCurrentLayoutVersion();
this.cTime = nsInfo.getCTime(); this.cTime = nsInfo.getCTime();
this.namespaceID = nsInfo.getNamespaceID(); this.namespaceID = nsInfo.getNamespaceID();
this.blockpoolID = nsInfo.getBlockPoolID(); this.blockpoolID = nsInfo.getBlockPoolID();
@ -388,7 +387,7 @@ public class BlockPoolSliceStorage extends Storage {
} }
readProperties(sd); readProperties(sd);
checkVersionUpgradable(this.layoutVersion); checkVersionUpgradable(this.layoutVersion);
assert this.layoutVersion >= HdfsServerConstants.DATANODE_LAYOUT_VERSION assert this.layoutVersion >= DataNodeLayoutVersion.getCurrentLayoutVersion()
: "Future version is not allowed"; : "Future version is not allowed";
if (getNamespaceID() != nsInfo.getNamespaceID()) { if (getNamespaceID() != nsInfo.getNamespaceID()) {
throw new IOException("Incompatible namespaceIDs in " throw new IOException("Incompatible namespaceIDs in "
@ -402,17 +401,17 @@ public class BlockPoolSliceStorage extends Storage {
+ nsInfo.getBlockPoolID() + "; datanode blockpoolID = " + nsInfo.getBlockPoolID() + "; datanode blockpoolID = "
+ blockpoolID); + blockpoolID);
} }
if (this.layoutVersion == HdfsServerConstants.DATANODE_LAYOUT_VERSION if (this.layoutVersion == DataNodeLayoutVersion.getCurrentLayoutVersion()
&& this.cTime == nsInfo.getCTime()) { && this.cTime == nsInfo.getCTime()) {
return false; // regular startup return false; // regular startup
} }
if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) { if (this.layoutVersion > DataNodeLayoutVersion.getCurrentLayoutVersion()) {
int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd)); int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
LOG.info("Restored {} block files from trash " + LOG.info("Restored {} block files from trash " +
"before the layout upgrade. These blocks will be moved to " + "before the layout upgrade. These blocks will be moved to " +
"the previous directory during the upgrade", restored); "the previous directory during the upgrade", restored);
} }
if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION if (this.layoutVersion > DataNodeLayoutVersion.getCurrentLayoutVersion()
|| this.cTime < nsInfo.getCTime()) { || this.cTime < nsInfo.getCTime()) {
doUpgrade(sd, nsInfo, callables, conf); // upgrade doUpgrade(sd, nsInfo, callables, conf); // upgrade
return true; return true;
@ -459,8 +458,8 @@ public class BlockPoolSliceStorage extends Storage {
final int oldLV = getLayoutVersion(); final int oldLV = getLayoutVersion();
LOG.info("Upgrading block pool storage directory {}.\n old LV = {}; old" LOG.info("Upgrading block pool storage directory {}.\n old LV = {}; old"
+ " CTime = {}.\n new LV = {}; new CTime = {}", + " CTime = {}.\n new LV = {}; new CTime = {}",
bpSd.getRoot(), oldLV, this.getCTime(), HdfsServerConstants bpSd.getRoot(), oldLV, this.getCTime(),
.DATANODE_LAYOUT_VERSION, nsInfo.getCTime()); DataNodeLayoutVersion.getCurrentLayoutVersion(), nsInfo.getCTime());
// get <SD>/previous directory // get <SD>/previous directory
String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath()); String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot)); StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot));
@ -507,7 +506,7 @@ public class BlockPoolSliceStorage extends Storage {
throws IOException { throws IOException {
// 3. Create new <SD>/current with block files hardlinks and VERSION // 3. Create new <SD>/current with block files hardlinks and VERSION
linkAllBlocks(bpTmpDir, bpCurDir, oldLV, conf); linkAllBlocks(bpTmpDir, bpCurDir, oldLV, conf);
this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; this.layoutVersion = DataNodeLayoutVersion.getCurrentLayoutVersion();
assert this.namespaceID == nsInfo.getNamespaceID() assert this.namespaceID == nsInfo.getNamespaceID()
: "Data-node and name-node layout versions must be the same."; : "Data-node and name-node layout versions must be the same.";
this.cTime = nsInfo.getCTime(); this.cTime = nsInfo.getCTime();
@ -616,13 +615,15 @@ public class BlockPoolSliceStorage extends Storage {
// the namespace state or can be further upgraded to it. // the namespace state or can be further upgraded to it.
// In another word, we can only roll back when ( storedLV >= software LV) // In another word, we can only roll back when ( storedLV >= software LV)
// && ( DN.previousCTime <= NN.ctime) // && ( DN.previousCTime <= NN.ctime)
if (!(prevInfo.getLayoutVersion() >= HdfsServerConstants.DATANODE_LAYOUT_VERSION && if (!(prevInfo.getLayoutVersion() >=
DataNodeLayoutVersion.getCurrentLayoutVersion() &&
prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
throw new InconsistentFSStateException(bpSd.getRoot(), throw new InconsistentFSStateException(bpSd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = " "Cannot rollback to a newer state.\nDatanode previous state: LV = "
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+ " is newer than the namespace state: LV = " + " is newer than the namespace state: LV = "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime()); + DataNodeLayoutVersion.getCurrentLayoutVersion() + " CTime = "
+ nsInfo.getCTime());
} }
LOG.info("Rolling back storage directory {}.\n target LV = {}; target " LOG.info("Rolling back storage directory {}.\n target LV = {}; target "

View File

@ -1579,7 +1579,7 @@ public class DataNode extends ReconfigurableBase
if (storageInfo == null) { if (storageInfo == null) {
// it's null in the case of SimulatedDataSet // it's null in the case of SimulatedDataSet
storageInfo = new StorageInfo( storageInfo = new StorageInfo(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION, DataNodeLayoutVersion.getCurrentLayoutVersion(),
nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(), nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
NodeType.DATA_NODE); NodeType.DATA_NODE);
} }

View File

@ -26,15 +26,36 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo; import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature; import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Private @InterfaceAudience.Private
public class DataNodeLayoutVersion { public class DataNodeLayoutVersion {
/** Build layout version and corresponding feature matrix */ /** Build layout version and corresponding feature matrix */
public final static Map<Integer, SortedSet<LayoutFeature>> FEATURES = public final static Map<Integer, SortedSet<LayoutFeature>> FEATURES =
new HashMap<Integer, SortedSet<LayoutFeature>>(); new HashMap<Integer, SortedSet<LayoutFeature>>();
public static final int CURRENT_LAYOUT_VERSION private static int currentLayoutVersion
= LayoutVersion.getCurrentLayoutVersion(Feature.values()); = LayoutVersion.getCurrentLayoutVersion(Feature.values());
/**
* Method for testing rolling upgrade.
* Do not call this method from production.
*
* @param lv new layout version to set
*/
@VisibleForTesting
static void setCurrentLayoutVersionForTesting(int lv) {
currentLayoutVersion = lv;
}
/**
* Get current layout version of the DataNode.
* @return the current layout version of the DataNode
*/
public static int getCurrentLayoutVersion() {
return currentLayoutVersion;
}
static{ static{
LayoutVersion.updateMap(FEATURES, LayoutVersion.Feature.values()); LayoutVersion.updateMap(FEATURES, LayoutVersion.Feature.values());
LayoutVersion.updateMap(FEATURES, DataNodeLayoutVersion.Feature.values()); LayoutVersion.updateMap(FEATURES, DataNodeLayoutVersion.Feature.values());

View File

@ -53,7 +53,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@ -571,7 +570,7 @@ public class DataStorage extends Storage {
void format(StorageDirectory sd, NamespaceInfo nsInfo, void format(StorageDirectory sd, NamespaceInfo nsInfo,
String newDatanodeUuid, Configuration conf) throws IOException { String newDatanodeUuid, Configuration conf) throws IOException {
sd.clearDirectory(); // create directory sd.clearDirectory(); // create directory
this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; this.layoutVersion = DataNodeLayoutVersion.getCurrentLayoutVersion();
this.clusterID = nsInfo.getClusterID(); this.clusterID = nsInfo.getClusterID();
this.namespaceID = nsInfo.getNamespaceID(); this.namespaceID = nsInfo.getNamespaceID();
this.cTime = 0; this.cTime = 0;
@ -728,8 +727,9 @@ public class DataStorage extends Storage {
} }
readProperties(sd); readProperties(sd);
checkVersionUpgradable(this.layoutVersion); checkVersionUpgradable(this.layoutVersion);
assert this.layoutVersion >= HdfsServerConstants.DATANODE_LAYOUT_VERSION : assert this.layoutVersion >=
"Future version is not allowed"; DataNodeLayoutVersion.getCurrentLayoutVersion() :
"Future version is not allowed";
boolean federationSupported = boolean federationSupported =
DataNodeLayoutVersion.supports( DataNodeLayoutVersion.supports(
@ -752,13 +752,13 @@ public class DataStorage extends Storage {
} }
// regular start up. // regular start up.
if (this.layoutVersion == HdfsServerConstants.DATANODE_LAYOUT_VERSION) { if (this.layoutVersion == DataNodeLayoutVersion.getCurrentLayoutVersion()) {
createStorageID(sd, layoutVersion, conf); createStorageID(sd, layoutVersion, conf);
return false; // need to write properties return false; // need to write properties
} }
// do upgrade // do upgrade
if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) { if (this.layoutVersion > DataNodeLayoutVersion.getCurrentLayoutVersion()) {
if (federationSupported) { if (federationSupported) {
// If the existing on-disk layout version supports federation, // If the existing on-disk layout version supports federation,
// simply update the properties. // simply update the properties.
@ -775,7 +775,7 @@ public class DataStorage extends Storage {
// failed. // failed.
throw new IOException("BUG: The stored LV = " + this.getLayoutVersion() throw new IOException("BUG: The stored LV = " + this.getLayoutVersion()
+ " is newer than the supported LV = " + " is newer than the supported LV = "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION); + DataNodeLayoutVersion.getCurrentLayoutVersion());
} }
/** /**
@ -807,7 +807,7 @@ public class DataStorage extends Storage {
final int oldLV = getLayoutVersion(); final int oldLV = getLayoutVersion();
LOG.info("Upgrading storage directory {}.\n old LV = {}; old CTime = {}" LOG.info("Upgrading storage directory {}.\n old LV = {}; old CTime = {}"
+ ".\n new LV = {}; new CTime = {}", sd.getRoot(), oldLV, + ".\n new LV = {}; new CTime = {}", sd.getRoot(), oldLV,
this.getCTime(), HdfsServerConstants.DATANODE_LAYOUT_VERSION, this.getCTime(), DataNodeLayoutVersion.getCurrentLayoutVersion(),
nsInfo.getCTime()); nsInfo.getCTime());
final File curDir = sd.getCurrentDir(); final File curDir = sd.getCurrentDir();
@ -868,9 +868,9 @@ public class DataStorage extends Storage {
throws IOException { throws IOException {
createStorageID(sd, layoutVersion, conf); createStorageID(sd, layoutVersion, conf);
LOG.info("Updating layout version from {} to {} for storage {}", LOG.info("Updating layout version from {} to {} for storage {}",
layoutVersion, HdfsServerConstants.DATANODE_LAYOUT_VERSION, layoutVersion, DataNodeLayoutVersion.getCurrentLayoutVersion(),
sd.getRoot()); sd.getRoot());
layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION; layoutVersion = DataNodeLayoutVersion.getCurrentLayoutVersion();
writeProperties(sd); writeProperties(sd);
} }
@ -922,11 +922,11 @@ public class DataStorage extends Storage {
// This is a regular startup or a post-federation rollback // This is a regular startup or a post-federation rollback
if (!prevDir.exists()) { if (!prevDir.exists()) {
if (DataNodeLayoutVersion.supports(LayoutVersion.Feature.FEDERATION, if (DataNodeLayoutVersion.supports(LayoutVersion.Feature.FEDERATION,
HdfsServerConstants.DATANODE_LAYOUT_VERSION)) { DataNodeLayoutVersion.getCurrentLayoutVersion())) {
readProperties(sd, HdfsServerConstants.DATANODE_LAYOUT_VERSION); readProperties(sd, DataNodeLayoutVersion.getCurrentLayoutVersion());
writeProperties(sd); writeProperties(sd);
LOG.info("Layout version rolled back to {} for storage {}", LOG.info("Layout version rolled back to {} for storage {}",
HdfsServerConstants.DATANODE_LAYOUT_VERSION, sd.getRoot()); DataNodeLayoutVersion.getCurrentLayoutVersion(), sd.getRoot());
} }
return; return;
} }
@ -935,17 +935,19 @@ public class DataStorage extends Storage {
// We allow rollback to a state, which is either consistent with // We allow rollback to a state, which is either consistent with
// the namespace state or can be further upgraded to it. // the namespace state or can be further upgraded to it.
if (!(prevInfo.getLayoutVersion() >= HdfsServerConstants.DATANODE_LAYOUT_VERSION if (!(prevInfo.getLayoutVersion() >=
&& prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback DataNodeLayoutVersion.getCurrentLayoutVersion()
&& prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
throw new InconsistentFSStateException(sd.getRoot(), throw new InconsistentFSStateException(sd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = " "Cannot rollback to a newer state.\nDatanode previous state: LV = "
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+ " is newer than the namespace state: LV = " + " is newer than the namespace state: LV = "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + DataNodeLayoutVersion.getCurrentLayoutVersion() + " CTime = "
+ nsInfo.getCTime()); + nsInfo.getCTime());
}
LOG.info("Rolling back storage directory {}.\n target LV = {}; target " LOG.info("Rolling back storage directory {}.\n target LV = {}; target "
+ "CTime = {}", sd.getRoot(), + "CTime = {}", sd.getRoot(),
HdfsServerConstants.DATANODE_LAYOUT_VERSION, nsInfo.getCTime()); DataNodeLayoutVersion.getCurrentLayoutVersion(), nsInfo.getCTime());
File tmpDir = sd.getRemovedTmp(); File tmpDir = sd.getRemovedTmp();
assert !tmpDir.exists() : "removed.tmp directory must not exist."; assert !tmpDir.exists() : "removed.tmp directory must not exist.";
// rename current to tmp // rename current to tmp

View File

@ -45,8 +45,6 @@ import java.io.InputStream;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.io.PrintStream; import java.io.PrintStream;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.Socket; import java.net.Socket;
@ -131,7 +129,6 @@ import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@ -147,12 +144,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw; import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@ -1953,39 +1948,6 @@ public class DFSTestUtil {
FsShellRun(cmd, 0, null, conf); FsShellRun(cmd, 0, null, conf);
} }
public static void addDataNodeLayoutVersion(final int lv, final String description)
throws NoSuchFieldException, IllegalAccessException {
Preconditions.checkState(lv < DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// Override {@link DataNodeLayoutVersion#CURRENT_LAYOUT_VERSION} via reflection.
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
Field field = DataNodeLayoutVersion.class.getField("CURRENT_LAYOUT_VERSION");
field.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.setInt(null, lv);
field = HdfsServerConstants.class.getField("DATANODE_LAYOUT_VERSION");
field.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.setInt(null, lv);
// Inject the feature into the FEATURES map.
final LayoutVersion.FeatureInfo featureInfo =
new LayoutVersion.FeatureInfo(lv, lv + 1, description, false);
final LayoutVersion.LayoutFeature feature =
new LayoutVersion.LayoutFeature() {
@Override
public LayoutVersion.FeatureInfo getInfo() {
return featureInfo;
}
};
// Update the FEATURES map with the new layout version.
LayoutVersion.updateMap(DataNodeLayoutVersion.FEATURES,
new LayoutVersion.LayoutFeature[] { feature });
}
/** /**
* Wait for datanode to reach alive or dead state for waitTime given in * Wait for datanode to reach alive or dead state for waitTime given in
* milliseconds. * milliseconds.

View File

@ -30,10 +30,10 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -191,7 +191,7 @@ public class TestDFSRollback {
UpgradeUtilities.getCurrentBlockPoolID(cluster)); UpgradeUtilities.getCurrentBlockPoolID(cluster));
// Put newer layout version in current. // Put newer layout version in current.
storageInfo = new StorageInfo( storageInfo = new StorageInfo(
HdfsServerConstants.DATANODE_LAYOUT_VERSION - 1, DataNodeLayoutVersion.getCurrentLayoutVersion() - 1,
UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentClusterID(cluster),
UpgradeUtilities.getCurrentFsscTime(cluster), UpgradeUtilities.getCurrentFsscTime(cluster),
@ -277,7 +277,8 @@ public class TestDFSRollback {
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous"); baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, storageInfo = new StorageInfo(
DataNodeLayoutVersion.getCurrentLayoutVersion(),
UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE,
NodeType.DATA_NODE); NodeType.DATA_NODE);

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.junit.After; import org.junit.After;
import org.junit.Test; import org.junit.Test;
@ -89,7 +90,7 @@ public class TestDFSStartupVersions {
*/ */
private StorageData[] initializeVersions() throws Exception { private StorageData[] initializeVersions() throws Exception {
int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION; int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION;
int layoutVersionCur = HdfsServerConstants.DATANODE_LAYOUT_VERSION; int layoutVersionCur = DataNodeLayoutVersion.getCurrentLayoutVersion();
int layoutVersionNew = Integer.MIN_VALUE; int layoutVersionNew = Integer.MIN_VALUE;
int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null); int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);
int namespaceIdOld = Integer.MIN_VALUE; int namespaceIdOld = Integer.MIN_VALUE;
@ -200,7 +201,7 @@ public class TestDFSStartupVersions {
return false; return false;
} }
// check #3 // check #3
int softwareLV = HdfsServerConstants.DATANODE_LAYOUT_VERSION; int softwareLV = DataNodeLayoutVersion.getCurrentLayoutVersion();
int storedLV = datanodeVer.getLayoutVersion(); int storedLV = datanodeVer.getLayoutVersion();
if (softwareLV == storedLV && if (softwareLV == storedLV &&
datanodeVer.getCTime() == namenodeVer.getCTime()) datanodeVer.getCTime() == namenodeVer.getCTime())

View File

@ -36,12 +36,12 @@ import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite; import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -303,7 +303,8 @@ public class TestDFSUpgrade {
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster(); cluster = createCluster();
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, storageInfo = new StorageInfo(
DataNodeLayoutVersion.getCurrentLayoutVersion(),
UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE,
NodeType.DATA_NODE); NodeType.DATA_NODE);

View File

@ -28,10 +28,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@ -191,8 +191,8 @@ public class TestDatanodeRegistration {
.getCTime(); .getCTime();
StorageInfo mockStorageInfo = mock(StorageInfo.class); StorageInfo mockStorageInfo = mock(StorageInfo.class);
doReturn(nnCTime).when(mockStorageInfo).getCTime(); doReturn(nnCTime).when(mockStorageInfo).getCTime();
doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo) doReturn(DataNodeLayoutVersion.getCurrentLayoutVersion())
.getLayoutVersion(); .when(mockStorageInfo).getLayoutVersion();
DatanodeRegistration dnReg = new DatanodeRegistration(dnId, DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
mockStorageInfo, null, VersionInfo.getVersion()); mockStorageInfo, null, VersionInfo.getVersion());
rpcServer.registerDatanode(dnReg); rpcServer.registerDatanode(dnReg);
@ -236,7 +236,8 @@ public class TestDatanodeRegistration {
doReturn(nnCTime).when(mockStorageInfo).getCTime(); doReturn(nnCTime).when(mockStorageInfo).getCTime();
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); doReturn(DataNodeLayoutVersion.getCurrentLayoutVersion())
.when(mockDnReg).getVersion();
doReturn("127.0.0.1").when(mockDnReg).getIpAddr(); doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
doReturn(123).when(mockDnReg).getXferPort(); doReturn(123).when(mockDnReg).getXferPort();
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid(); doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
@ -285,7 +286,8 @@ public class TestDatanodeRegistration {
doReturn(nnCTime).when(mockStorageInfo).getCTime(); doReturn(nnCTime).when(mockStorageInfo).getCTime();
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); doReturn(DataNodeLayoutVersion.getCurrentLayoutVersion())
.when(mockDnReg).getVersion();
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid(); doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.TestRollingUpgrade; import org.apache.hadoop.hdfs.TestRollingUpgrade;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -361,9 +362,8 @@ public class TestDataNodeRollingUpgrade {
// Restart the DN with a new layout version to trigger layout upgrade. // Restart the DN with a new layout version to trigger layout upgrade.
LOG.info("Shutting down the Datanode"); LOG.info("Shutting down the Datanode");
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0); MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
DFSTestUtil.addDataNodeLayoutVersion( addDataNodeLayoutVersion(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1, DataNodeLayoutVersion.getCurrentLayoutVersion() - 1);
"Test Layout for TestDataNodeRollingUpgrade");
LOG.info("Restarting the DataNode"); LOG.info("Restarting the DataNode");
cluster.restartDataNode(dnprop, true); cluster.restartDataNode(dnprop, true);
cluster.waitActive(); cluster.waitActive();
@ -422,9 +422,8 @@ public class TestDataNodeRollingUpgrade {
// Restart the DN with a new layout version to trigger layout upgrade. // Restart the DN with a new layout version to trigger layout upgrade.
LOG.info("Shutting down the Datanode"); LOG.info("Shutting down the Datanode");
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0); MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
DFSTestUtil.addDataNodeLayoutVersion( addDataNodeLayoutVersion(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1, DataNodeLayoutVersion.getCurrentLayoutVersion() - 1);
"Test Layout for TestDataNodeRollingUpgrade");
LOG.info("Restarting the DataNode"); LOG.info("Restarting the DataNode");
cluster.restartDataNode(dnprop, true); cluster.restartDataNode(dnprop, true);
cluster.waitActive(); cluster.waitActive();
@ -470,4 +469,18 @@ public class TestDataNodeRollingUpgrade {
shutdownCluster(); shutdownCluster();
} }
} }
static void addDataNodeLayoutVersion(final int lv) {
assertTrue(lv < DataNodeLayoutVersion.getCurrentLayoutVersion());
DataNodeLayoutVersion.setCurrentLayoutVersionForTesting(lv);
// Inject the feature into the FEATURES map.
final LayoutVersion.FeatureInfo featureInfo =
new LayoutVersion.FeatureInfo(lv, lv + 1,
"Test Layout for TestDataNodeRollingUpgrade", false);
// Update the FEATURES map with the new layout version.
LayoutVersion.updateMap(DataNodeLayoutVersion.FEATURES,
new LayoutVersion.LayoutFeature[]{() -> featureInfo});
}
} }