HDFS-5754. Split LayoutVerion into NameNodeLayoutVersion and DataNodeLayoutVersion. Contributed by Brandon Li
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1563041 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
efcdf81ca9
commit
00067895a0
|
@ -15,3 +15,6 @@ HDFS-5535 subtasks:
|
||||||
|
|
||||||
HDFS-5835. Add a new option for starting Namenode when rolling upgrade is
|
HDFS-5835. Add a new option for starting Namenode when rolling upgrade is
|
||||||
in progress. (szetszwo)
|
in progress. (szetszwo)
|
||||||
|
|
||||||
|
HDFS-5754. Split LayoutVerion into NameNodeLayoutVersion and
|
||||||
|
DataNodeLayoutVersion. (Brandon Li via szetszwo)
|
||||||
|
|
|
@ -397,7 +397,7 @@ public class BookKeeperJournalManager implements JournalManager {
|
||||||
try {
|
try {
|
||||||
String znodePath = inprogressZNode(txId);
|
String znodePath = inprogressZNode(txId);
|
||||||
EditLogLedgerMetadata l = new EditLogLedgerMetadata(znodePath,
|
EditLogLedgerMetadata l = new EditLogLedgerMetadata(znodePath,
|
||||||
HdfsConstants.LAYOUT_VERSION, currentLedger.getId(), txId);
|
HdfsConstants.NAMENODE_LAYOUT_VERSION, currentLedger.getId(), txId);
|
||||||
/* Write the ledger metadata out to the inprogress ledger znode
|
/* Write the ledger metadata out to the inprogress ledger znode
|
||||||
* This can fail if for some reason our write lock has
|
* This can fail if for some reason our write lock has
|
||||||
* expired (@see WriteLock) and another process has managed to
|
* expired (@see WriteLock) and another process has managed to
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class TestBookKeeperEditLogStreams {
|
||||||
lh.close();
|
lh.close();
|
||||||
|
|
||||||
EditLogLedgerMetadata metadata = new EditLogLedgerMetadata("/foobar",
|
EditLogLedgerMetadata metadata = new EditLogLedgerMetadata("/foobar",
|
||||||
HdfsConstants.LAYOUT_VERSION, lh.getId(), 0x1234);
|
HdfsConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234);
|
||||||
try {
|
try {
|
||||||
new BookKeeperEditLogInputStream(lh, metadata, -1);
|
new BookKeeperEditLogInputStream(lh, metadata, -1);
|
||||||
fail("Shouldn't get this far, should have thrown");
|
fail("Shouldn't get this far, should have thrown");
|
||||||
|
@ -77,7 +77,7 @@ public class TestBookKeeperEditLogStreams {
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata = new EditLogLedgerMetadata("/foobar",
|
metadata = new EditLogLedgerMetadata("/foobar",
|
||||||
HdfsConstants.LAYOUT_VERSION, lh.getId(), 0x1234);
|
HdfsConstants.NAMENODE_LAYOUT_VERSION, lh.getId(), 0x1234);
|
||||||
try {
|
try {
|
||||||
new BookKeeperEditLogInputStream(lh, metadata, 0);
|
new BookKeeperEditLogInputStream(lh, metadata, 0);
|
||||||
fail("Shouldn't get this far, should have thrown");
|
fail("Shouldn't get this far, should have thrown");
|
||||||
|
|
|
@ -25,6 +25,10 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
|
||||||
|
|
||||||
/************************************
|
/************************************
|
||||||
* Some handy constants
|
* Some handy constants
|
||||||
|
@ -125,10 +129,18 @@ public class HdfsConstants {
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Please see {@link LayoutVersion} on adding new layout version.
|
* Current layout version for NameNode.
|
||||||
|
* Please see {@link NameNodeLayoutVersion.Feature} on adding new layout version.
|
||||||
*/
|
*/
|
||||||
public static final int LAYOUT_VERSION = LayoutVersion
|
public static final int NAMENODE_LAYOUT_VERSION
|
||||||
.getCurrentLayoutVersion();
|
= NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Current layout version for DataNode.
|
||||||
|
* Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version.
|
||||||
|
*/
|
||||||
|
public static final int DATANODE_LAYOUT_VERSION
|
||||||
|
= DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A special path component contained in the path for a snapshot file/dir
|
* A special path component contained in the path for a snapshot file/dir
|
||||||
|
|
|
@ -17,9 +17,10 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocol;
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
import java.util.EnumSet;
|
import java.util.Comparator;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.SortedSet;
|
||||||
|
import java.util.TreeSet;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
@ -54,19 +55,27 @@ public class LayoutVersion {
|
||||||
public static final int BUGFIX_HDFS_2991_VERSION = -40;
|
public static final int BUGFIX_HDFS_2991_VERSION = -40;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enums for features that change the layout version.
|
* The interface to be implemented by NameNode and DataNode layout features
|
||||||
|
*/
|
||||||
|
public interface LayoutFeature {
|
||||||
|
public FeatureInfo getInfo();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enums for features that change the layout version before rolling
|
||||||
|
* upgrade is supported.
|
||||||
* <br><br>
|
* <br><br>
|
||||||
* To add a new layout version:
|
* To add a new layout version:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>Define a new enum constant with a short enum name, the new layout version
|
* <li>Define a new enum constant with a short enum name, the new layout version
|
||||||
* and description of the added feature.</li>
|
* and description of the added feature.</li>
|
||||||
* <li>When adding a layout version with an ancestor that is not same as
|
* <li>When adding a layout version with an ancestor that is not same as
|
||||||
* its immediate predecessor, use the constructor where a spacific ancestor
|
* its immediate predecessor, use the constructor where a specific ancestor
|
||||||
* can be passed.
|
* can be passed.
|
||||||
* </li>
|
* </li>
|
||||||
* </ul>
|
* </ul>
|
||||||
*/
|
*/
|
||||||
public static enum Feature {
|
public static enum Feature implements LayoutFeature {
|
||||||
NAMESPACE_QUOTA(-16, "Support for namespace quotas"),
|
NAMESPACE_QUOTA(-16, "Support for namespace quotas"),
|
||||||
FILE_ACCESS_TIME(-17, "Support for access time on files"),
|
FILE_ACCESS_TIME(-17, "Support for access time on files"),
|
||||||
DISKSPACE_QUOTA(-18, "Support for disk space quotas"),
|
DISKSPACE_QUOTA(-18, "Support for disk space quotas"),
|
||||||
|
@ -112,14 +121,9 @@ public class LayoutVersion {
|
||||||
ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid."
|
ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid."
|
||||||
+ " Use distinct StorageUuid per storage directory."),
|
+ " Use distinct StorageUuid per storage directory."),
|
||||||
ADD_LAYOUT_FLAGS(-50, "Add support for layout flags."),
|
ADD_LAYOUT_FLAGS(-50, "Add support for layout flags."),
|
||||||
CACHING(-51, "Support for cache pools and path-based caching"),
|
CACHING(-51, "Support for cache pools and path-based caching");
|
||||||
ROLLING_UPGRADE_MARKER(-52, "Upgrade marker for rolling upgrade");
|
|
||||||
|
|
||||||
final int lv;
|
private final FeatureInfo info;
|
||||||
final int ancestorLV;
|
|
||||||
final String description;
|
|
||||||
final boolean reserved;
|
|
||||||
final Feature[] specialFeatures;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Feature that is added at layout version {@code lv} - 1.
|
* Feature that is added at layout version {@code lv} - 1.
|
||||||
|
@ -136,16 +140,35 @@ public class LayoutVersion {
|
||||||
* @param ancestorLV layout version from which the new lv is derived from.
|
* @param ancestorLV layout version from which the new lv is derived from.
|
||||||
* @param description description of the feature
|
* @param description description of the feature
|
||||||
* @param reserved true when this is a layout version reserved for previous
|
* @param reserved true when this is a layout version reserved for previous
|
||||||
* verions
|
* version
|
||||||
* @param features set of features that are to be enabled for this version
|
* @param features set of features that are to be enabled for this version
|
||||||
*/
|
*/
|
||||||
Feature(final int lv, final int ancestorLV, final String description,
|
Feature(final int lv, final int ancestorLV, final String description,
|
||||||
boolean reserved, Feature... features) {
|
boolean reserved, Feature... features) {
|
||||||
|
info = new FeatureInfo(lv, ancestorLV, description, reserved, features);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FeatureInfo getInfo() {
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Feature information. */
|
||||||
|
public static class FeatureInfo {
|
||||||
|
private final int lv;
|
||||||
|
private final int ancestorLV;
|
||||||
|
private final String description;
|
||||||
|
private final boolean reserved;
|
||||||
|
private final LayoutFeature[] specialFeatures;
|
||||||
|
|
||||||
|
public FeatureInfo(final int lv, final int ancestorLV, final String description,
|
||||||
|
boolean reserved, LayoutFeature... specialFeatures) {
|
||||||
this.lv = lv;
|
this.lv = lv;
|
||||||
this.ancestorLV = ancestorLV;
|
this.ancestorLV = ancestorLV;
|
||||||
this.description = description;
|
this.description = description;
|
||||||
this.reserved = reserved;
|
this.reserved = reserved;
|
||||||
specialFeatures = features;
|
this.specialFeatures = specialFeatures;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -175,80 +198,88 @@ public class LayoutVersion {
|
||||||
public boolean isReservedForOldRelease() {
|
public boolean isReservedForOldRelease() {
|
||||||
return reserved;
|
return reserved;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public LayoutFeature[] getSpecialFeatures() {
|
||||||
|
return specialFeatures;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build layout version and corresponding feature matrix
|
static class LayoutFeatureComparator implements Comparator<LayoutFeature> {
|
||||||
static final Map<Integer, EnumSet<Feature>>map =
|
@Override
|
||||||
new HashMap<Integer, EnumSet<Feature>>();
|
public int compare(LayoutFeature arg0, LayoutFeature arg1) {
|
||||||
|
return arg0.getInfo().getLayoutVersion()
|
||||||
// Static initialization
|
- arg1.getInfo().getLayoutVersion();
|
||||||
static {
|
}
|
||||||
initMap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
public static void updateMap(Map<Integer, SortedSet<LayoutFeature>> map,
|
||||||
* Initialize the map of a layout version and EnumSet of {@link Feature}s
|
LayoutFeature[] features) {
|
||||||
* supported.
|
|
||||||
*/
|
|
||||||
private static void initMap() {
|
|
||||||
// Go through all the enum constants and build a map of
|
// Go through all the enum constants and build a map of
|
||||||
// LayoutVersion <-> EnumSet of all supported features in that LayoutVersion
|
// LayoutVersion <-> Set of all supported features in that LayoutVersion
|
||||||
for (Feature f : Feature.values()) {
|
for (LayoutFeature f : features) {
|
||||||
EnumSet<Feature> ancestorSet = map.get(f.ancestorLV);
|
final FeatureInfo info = f.getInfo();
|
||||||
|
SortedSet<LayoutFeature> ancestorSet = map.get(info.getAncestorLayoutVersion());
|
||||||
if (ancestorSet == null) {
|
if (ancestorSet == null) {
|
||||||
ancestorSet = EnumSet.noneOf(Feature.class); // Empty enum set
|
// Empty set
|
||||||
map.put(f.ancestorLV, ancestorSet);
|
ancestorSet = new TreeSet<LayoutFeature>(new LayoutFeatureComparator());
|
||||||
|
map.put(info.getAncestorLayoutVersion(), ancestorSet);
|
||||||
}
|
}
|
||||||
EnumSet<Feature> featureSet = EnumSet.copyOf(ancestorSet);
|
SortedSet<LayoutFeature> featureSet = new TreeSet<LayoutFeature>(ancestorSet);
|
||||||
if (f.specialFeatures != null) {
|
if (info.getSpecialFeatures() != null) {
|
||||||
for (Feature specialFeature : f.specialFeatures) {
|
for (LayoutFeature specialFeature : info.getSpecialFeatures()) {
|
||||||
featureSet.add(specialFeature);
|
featureSet.add(specialFeature);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
featureSet.add(f);
|
featureSet.add(f);
|
||||||
map.put(f.lv, featureSet);
|
map.put(info.getLayoutVersion(), featureSet);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets formatted string that describes {@link LayoutVersion} information.
|
* Gets formatted string that describes {@link LayoutVersion} information.
|
||||||
*/
|
*/
|
||||||
public static String getString() {
|
public String getString(Map<Integer, SortedSet<LayoutFeature>> map,
|
||||||
|
LayoutFeature[] values) {
|
||||||
final StringBuilder buf = new StringBuilder();
|
final StringBuilder buf = new StringBuilder();
|
||||||
buf.append("Feature List:\n");
|
buf.append("Feature List:\n");
|
||||||
for (Feature f : Feature.values()) {
|
for (LayoutFeature f : values) {
|
||||||
|
final FeatureInfo info = f.getInfo();
|
||||||
buf.append(f).append(" introduced in layout version ")
|
buf.append(f).append(" introduced in layout version ")
|
||||||
.append(f.lv).append(" (").
|
.append(info.getLayoutVersion()).append(" (")
|
||||||
append(f.description).append(")\n");
|
.append(info.getDescription()).append(")\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
buf.append("\n\nLayoutVersion and supported features:\n");
|
buf.append("\n\nLayoutVersion and supported features:\n");
|
||||||
for (Feature f : Feature.values()) {
|
for (LayoutFeature f : values) {
|
||||||
buf.append(f.lv).append(": ").append(map.get(f.lv))
|
final FeatureInfo info = f.getInfo();
|
||||||
.append("\n");
|
buf.append(info.getLayoutVersion()).append(": ")
|
||||||
|
.append(map.get(info.getLayoutVersion())).append("\n");
|
||||||
}
|
}
|
||||||
return buf.toString();
|
return buf.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if a given feature is supported in the given layout version
|
* Returns true if a given feature is supported in the given layout version
|
||||||
|
* @param map layout feature map
|
||||||
* @param f Feature
|
* @param f Feature
|
||||||
* @param lv LayoutVersion
|
* @param lv LayoutVersion
|
||||||
* @return true if {@code f} is supported in layout version {@code lv}
|
* @return true if {@code f} is supported in layout version {@code lv}
|
||||||
*/
|
*/
|
||||||
public static boolean supports(final Feature f, final int lv) {
|
public static boolean supports(Map<Integer, SortedSet<LayoutFeature>> map,
|
||||||
final EnumSet<Feature> set = map.get(lv);
|
final LayoutFeature f, final int lv) {
|
||||||
|
final SortedSet<LayoutFeature> set = map.get(lv);
|
||||||
return set != null && set.contains(f);
|
return set != null && set.contains(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the current layout version
|
* Get the current layout version
|
||||||
*/
|
*/
|
||||||
public static int getCurrentLayoutVersion() {
|
public static int getCurrentLayoutVersion(
|
||||||
Feature[] values = Feature.values();
|
Map<Integer, SortedSet<LayoutFeature>> map, LayoutFeature[] values) {
|
||||||
for (int i = values.length -1; i >= 0; i--) {
|
for (int i = values.length -1; i >= 0; i--) {
|
||||||
if (!values[i].isReservedForOldRelease()) {
|
final FeatureInfo info = values[i].getInfo();
|
||||||
return values[i].lv;
|
if (!info.isReservedForOldRelease()) {
|
||||||
|
return info.getLayoutVersion();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new AssertionError("All layout versions are reserved.");
|
throw new AssertionError("All layout versions are reserved.");
|
||||||
|
|
|
@ -138,6 +138,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
|
@ -232,9 +233,9 @@ public class PBHelper {
|
||||||
.setNamespceID(info.getNamespaceID()).build();
|
.setNamespceID(info.getNamespaceID()).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static StorageInfo convert(StorageInfoProto info) {
|
public static StorageInfo convert(StorageInfoProto info, NodeType type) {
|
||||||
return new StorageInfo(info.getLayoutVersion(), info.getNamespceID(),
|
return new StorageInfo(info.getLayoutVersion(), info.getNamespceID(),
|
||||||
info.getClusterID(), info.getCTime());
|
info.getClusterID(), info.getCTime(), type);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static NamenodeRegistrationProto convert(NamenodeRegistration reg) {
|
public static NamenodeRegistrationProto convert(NamenodeRegistration reg) {
|
||||||
|
@ -245,8 +246,9 @@ public class PBHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static NamenodeRegistration convert(NamenodeRegistrationProto reg) {
|
public static NamenodeRegistration convert(NamenodeRegistrationProto reg) {
|
||||||
|
StorageInfo si = convert(reg.getStorageInfo(), NodeType.NAME_NODE);
|
||||||
return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(),
|
return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(),
|
||||||
convert(reg.getStorageInfo()), convert(reg.getRole()));
|
si, convert(reg.getRole()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// DatanodeId
|
// DatanodeId
|
||||||
|
@ -378,9 +380,9 @@ public class PBHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static CheckpointSignature convert(CheckpointSignatureProto s) {
|
public static CheckpointSignature convert(CheckpointSignatureProto s) {
|
||||||
return new CheckpointSignature(PBHelper.convert(s.getStorageInfo()),
|
StorageInfo si = PBHelper.convert(s.getStorageInfo(), NodeType.NAME_NODE);
|
||||||
s.getBlockPoolId(), s.getMostRecentCheckpointTxId(),
|
return new CheckpointSignature(si, s.getBlockPoolId(),
|
||||||
s.getCurSegmentTxId());
|
s.getMostRecentCheckpointTxId(), s.getCurSegmentTxId());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static RemoteEditLogProto convert(RemoteEditLog log) {
|
public static RemoteEditLogProto convert(RemoteEditLog log) {
|
||||||
|
@ -733,9 +735,9 @@ public class PBHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
|
public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
|
||||||
|
StorageInfo si = convert(proto.getStorageInfo(), NodeType.DATA_NODE);
|
||||||
return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()),
|
return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()),
|
||||||
PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto
|
si, PBHelper.convert(proto.getKeys()), proto.getSoftwareVersion());
|
||||||
.getKeys()), proto.getSoftwareVersion());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeCommand convert(DatanodeCommandProto proto) {
|
public static DatanodeCommand convert(DatanodeCommandProto proto) {
|
||||||
|
|
|
@ -64,6 +64,8 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogs
|
||||||
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto;
|
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto;
|
||||||
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto;
|
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto;
|
||||||
import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
|
import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
|
@ -274,9 +276,9 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
|
||||||
@Override
|
@Override
|
||||||
public DoUpgradeResponseProto doUpgrade(RpcController controller,
|
public DoUpgradeResponseProto doUpgrade(RpcController controller,
|
||||||
DoUpgradeRequestProto request) throws ServiceException {
|
DoUpgradeRequestProto request) throws ServiceException {
|
||||||
|
StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.NAME_NODE);
|
||||||
try {
|
try {
|
||||||
impl.doUpgrade(convert(request.getJid()),
|
impl.doUpgrade(convert(request.getJid()), si);
|
||||||
PBHelper.convert(request.getSInfo()));
|
|
||||||
return DoUpgradeResponseProto.getDefaultInstance();
|
return DoUpgradeResponseProto.getDefaultInstance();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
|
@ -298,9 +300,9 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
|
||||||
public CanRollBackResponseProto canRollBack(RpcController controller,
|
public CanRollBackResponseProto canRollBack(RpcController controller,
|
||||||
CanRollBackRequestProto request) throws ServiceException {
|
CanRollBackRequestProto request) throws ServiceException {
|
||||||
try {
|
try {
|
||||||
Boolean result = impl.canRollBack(convert(request.getJid()),
|
StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.NAME_NODE);
|
||||||
PBHelper.convert(request.getStorage()),
|
Boolean result = impl.canRollBack(convert(request.getJid()), si,
|
||||||
PBHelper.convert(request.getPrevStorage()),
|
PBHelper.convert(request.getPrevStorage(), NodeType.NAME_NODE),
|
||||||
request.getTargetLayoutVersion());
|
request.getTargetLayoutVersion());
|
||||||
return CanRollBackResponseProto.newBuilder()
|
return CanRollBackResponseProto.newBuilder()
|
||||||
.setCanRollBack(result)
|
.setCanRollBack(result)
|
||||||
|
|
|
@ -45,8 +45,9 @@ public class IncorrectVersionException extends IOException {
|
||||||
minimumVersion + "'");
|
minimumVersion + "'");
|
||||||
}
|
}
|
||||||
|
|
||||||
public IncorrectVersionException(int versionReported, String ofWhat) {
|
public IncorrectVersionException(int currentLayoutVersion,
|
||||||
this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION);
|
int versionReported, String ofWhat) {
|
||||||
|
this(versionReported, ofWhat, currentLayoutVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
public IncorrectVersionException(int versionReported,
|
public IncorrectVersionException(int versionReported,
|
||||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
@ -116,7 +115,6 @@ public abstract class Storage extends StorageInfo {
|
||||||
public boolean isOfType(StorageDirType type);
|
public boolean isOfType(StorageDirType type);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected NodeType storageType; // Type of the node using this storage
|
|
||||||
protected List<StorageDirectory> storageDirs = new ArrayList<StorageDirectory>();
|
protected List<StorageDirectory> storageDirs = new ArrayList<StorageDirectory>();
|
||||||
|
|
||||||
private class DirIterator implements Iterator<StorageDirectory> {
|
private class DirIterator implements Iterator<StorageDirectory> {
|
||||||
|
@ -781,13 +779,11 @@ public abstract class Storage extends StorageInfo {
|
||||||
* Create empty storage info of the specified type
|
* Create empty storage info of the specified type
|
||||||
*/
|
*/
|
||||||
protected Storage(NodeType type) {
|
protected Storage(NodeType type) {
|
||||||
super();
|
super(type);
|
||||||
this.storageType = type;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected Storage(NodeType type, StorageInfo storageInfo) {
|
protected Storage(StorageInfo storageInfo) {
|
||||||
super(storageInfo);
|
super(storageInfo);
|
||||||
this.storageType = type;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getNumStorageDirs() {
|
public int getNumStorageDirs() {
|
||||||
|
@ -931,36 +927,12 @@ public abstract class Storage extends StorageInfo {
|
||||||
props.setProperty("storageType", storageType.toString());
|
props.setProperty("storageType", storageType.toString());
|
||||||
props.setProperty("namespaceID", String.valueOf(namespaceID));
|
props.setProperty("namespaceID", String.valueOf(namespaceID));
|
||||||
// Set clusterID in version with federation support
|
// Set clusterID in version with federation support
|
||||||
if (versionSupportsFederation()) {
|
if (versionSupportsFederation(getServiceLayoutFeatureMap())) {
|
||||||
props.setProperty("clusterID", clusterID);
|
props.setProperty("clusterID", clusterID);
|
||||||
}
|
}
|
||||||
props.setProperty("cTime", String.valueOf(cTime));
|
props.setProperty("cTime", String.valueOf(cTime));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get common storage fields.
|
|
||||||
* Should be overloaded if additional fields need to be get.
|
|
||||||
*
|
|
||||||
* @param props
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
protected void setFieldsFromProperties(
|
|
||||||
Properties props, StorageDirectory sd) throws IOException {
|
|
||||||
super.setFieldsFromProperties(props, sd);
|
|
||||||
setStorageType(props, sd);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Validate and set storage type from {@link Properties}*/
|
|
||||||
protected void setStorageType(Properties props, StorageDirectory sd)
|
|
||||||
throws InconsistentFSStateException {
|
|
||||||
NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
|
|
||||||
if (!storageType.equals(type)) {
|
|
||||||
throw new InconsistentFSStateException(sd.root,
|
|
||||||
"node type is incompatible with others.");
|
|
||||||
}
|
|
||||||
storageType = type;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Write properties to the VERSION file in the given storage directory.
|
* Write properties to the VERSION file in the given storage directory.
|
||||||
*/
|
*/
|
||||||
|
@ -1025,7 +997,7 @@ public abstract class Storage extends StorageInfo {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void writeAll() throws IOException {
|
public void writeAll() throws IOException {
|
||||||
this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
this.layoutVersion = getServiceLayoutVersion();
|
||||||
for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
|
for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
|
||||||
writeProperties(it.next());
|
writeProperties(it.next());
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,15 +21,22 @@ import java.io.File;
|
||||||
import java.io.FileInputStream;
|
import java.io.FileInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.RandomAccessFile;
|
import java.io.RandomAccessFile;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
import java.util.SortedSet;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Common class for storage information.
|
* Common class for storage information.
|
||||||
|
@ -43,21 +50,25 @@ public class StorageInfo {
|
||||||
public String clusterID; // id of the cluster
|
public String clusterID; // id of the cluster
|
||||||
public long cTime; // creation time of the file system state
|
public long cTime; // creation time of the file system state
|
||||||
|
|
||||||
|
protected final NodeType storageType; // Type of the node using this storage
|
||||||
|
|
||||||
protected static final String STORAGE_FILE_VERSION = "VERSION";
|
protected static final String STORAGE_FILE_VERSION = "VERSION";
|
||||||
|
|
||||||
public StorageInfo () {
|
public StorageInfo(NodeType type) {
|
||||||
this(0, 0, "", 0L);
|
this(0, 0, "", 0L, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
public StorageInfo(int layoutV, int nsID, String cid, long cT) {
|
public StorageInfo(int layoutV, int nsID, String cid, long cT, NodeType type) {
|
||||||
layoutVersion = layoutV;
|
layoutVersion = layoutV;
|
||||||
clusterID = cid;
|
clusterID = cid;
|
||||||
namespaceID = nsID;
|
namespaceID = nsID;
|
||||||
cTime = cT;
|
cTime = cT;
|
||||||
|
storageType = type;
|
||||||
}
|
}
|
||||||
|
|
||||||
public StorageInfo(StorageInfo from) {
|
public StorageInfo(StorageInfo from) {
|
||||||
setStorageInfo(from);
|
this(from.layoutVersion, from.namespaceID, from.clusterID, from.cTime,
|
||||||
|
from.storageType);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -84,14 +95,17 @@ public class StorageInfo {
|
||||||
public long getCTime() { return cTime; }
|
public long getCTime() { return cTime; }
|
||||||
|
|
||||||
public void setStorageInfo(StorageInfo from) {
|
public void setStorageInfo(StorageInfo from) {
|
||||||
|
Preconditions.checkArgument(from.storageType == storageType);
|
||||||
layoutVersion = from.layoutVersion;
|
layoutVersion = from.layoutVersion;
|
||||||
clusterID = from.clusterID;
|
clusterID = from.clusterID;
|
||||||
namespaceID = from.namespaceID;
|
namespaceID = from.namespaceID;
|
||||||
cTime = from.cTime;
|
cTime = from.cTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean versionSupportsFederation() {
|
public boolean versionSupportsFederation(
|
||||||
return LayoutVersion.supports(Feature.FEDERATION, layoutVersion);
|
Map<Integer, SortedSet<LayoutFeature>> map) {
|
||||||
|
return LayoutVersion.supports(map, LayoutVersion.Feature.FEDERATION,
|
||||||
|
layoutVersion);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -145,6 +159,17 @@ public class StorageInfo {
|
||||||
setNamespaceID(props, sd);
|
setNamespaceID(props, sd);
|
||||||
setcTime(props, sd);
|
setcTime(props, sd);
|
||||||
setClusterId(props, layoutVersion, sd);
|
setClusterId(props, layoutVersion, sd);
|
||||||
|
checkStorageType(props, sd);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Validate and set storage type from {@link Properties}*/
|
||||||
|
protected void checkStorageType(Properties props, StorageDirectory sd)
|
||||||
|
throws InconsistentFSStateException {
|
||||||
|
NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
|
||||||
|
if (!storageType.equals(type)) {
|
||||||
|
throw new InconsistentFSStateException(sd.root,
|
||||||
|
"node type is incompatible with others.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Validate and set ctime from {@link Properties}*/
|
/** Validate and set ctime from {@link Properties}*/
|
||||||
|
@ -157,7 +182,8 @@ public class StorageInfo {
|
||||||
protected void setClusterId(Properties props, int layoutVersion,
|
protected void setClusterId(Properties props, int layoutVersion,
|
||||||
StorageDirectory sd) throws InconsistentFSStateException {
|
StorageDirectory sd) throws InconsistentFSStateException {
|
||||||
// Set cluster ID in version that supports federation
|
// Set cluster ID in version that supports federation
|
||||||
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
if (LayoutVersion.supports(getServiceLayoutFeatureMap(),
|
||||||
|
Feature.FEDERATION, layoutVersion)) {
|
||||||
String cid = getProperty(props, sd, "clusterID");
|
String cid = getProperty(props, sd, "clusterID");
|
||||||
if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
|
if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
|
||||||
throw new InconsistentFSStateException(sd.getRoot(),
|
throw new InconsistentFSStateException(sd.getRoot(),
|
||||||
|
@ -171,9 +197,9 @@ public class StorageInfo {
|
||||||
protected void setLayoutVersion(Properties props, StorageDirectory sd)
|
protected void setLayoutVersion(Properties props, StorageDirectory sd)
|
||||||
throws IncorrectVersionException, InconsistentFSStateException {
|
throws IncorrectVersionException, InconsistentFSStateException {
|
||||||
int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
|
int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
|
||||||
if (lv < HdfsConstants.LAYOUT_VERSION) { // future version
|
if (lv < getServiceLayoutVersion()) { // future version
|
||||||
throw new IncorrectVersionException(lv, "storage directory "
|
throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
|
||||||
+ sd.root.getAbsolutePath());
|
"storage directory " + sd.root.getAbsolutePath());
|
||||||
}
|
}
|
||||||
layoutVersion = lv;
|
layoutVersion = lv;
|
||||||
}
|
}
|
||||||
|
@ -189,6 +215,16 @@ public class StorageInfo {
|
||||||
namespaceID = nsId;
|
namespaceID = nsId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public int getServiceLayoutVersion() {
|
||||||
|
return storageType == NodeType.DATA_NODE ? HdfsConstants.DATANODE_LAYOUT_VERSION
|
||||||
|
: HdfsConstants.NAMENODE_LAYOUT_VERSION;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<Integer, SortedSet<LayoutFeature>> getServiceLayoutFeatureMap() {
|
||||||
|
return storageType == NodeType.DATA_NODE? DataNodeLayoutVersion.FEATURES
|
||||||
|
: NameNodeLayoutVersion.FEATURES;
|
||||||
|
}
|
||||||
|
|
||||||
static String getProperty(Properties props, StorageDirectory sd,
|
static String getProperty(Properties props, StorageDirectory sd,
|
||||||
String name) throws InconsistentFSStateException {
|
String name) throws InconsistentFSStateException {
|
||||||
String property = props.getProperty(name);
|
String property = props.getProperty(name);
|
||||||
|
|
|
@ -204,12 +204,10 @@ class BPServiceActor implements Runnable {
|
||||||
"limits. Note: This is normal during a rolling upgrade.");
|
"limits. Note: This is normal during a rolling upgrade.");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (HdfsConstants.LAYOUT_VERSION != nsInfo.getLayoutVersion()) {
|
if (HdfsConstants.DATANODE_LAYOUT_VERSION != nsInfo.getLayoutVersion()) {
|
||||||
LOG.warn("DataNode and NameNode layout versions must be the same." +
|
LOG.info("DataNode and NameNode layout versions are different:" +
|
||||||
" Expected: "+ HdfsConstants.LAYOUT_VERSION +
|
" DataNode version: "+ HdfsConstants.DATANODE_LAYOUT_VERSION +
|
||||||
" actual "+ nsInfo.getLayoutVersion());
|
" NameNode version: "+ nsInfo.getLayoutVersion());
|
||||||
throw new IncorrectVersionException(
|
|
||||||
nsInfo.getLayoutVersion(), "namenode");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.HardLink;
|
import org.apache.hadoop.fs.HardLink;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
@ -65,7 +64,7 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
private String blockpoolID = ""; // id of the blockpool
|
private String blockpoolID = ""; // id of the blockpool
|
||||||
|
|
||||||
public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) {
|
public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) {
|
||||||
super(NodeType.DATA_NODE, storageInfo);
|
super(storageInfo);
|
||||||
blockpoolID = bpid;
|
blockpoolID = bpid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,9 +92,6 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
*/
|
*/
|
||||||
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
|
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
|
||||||
Collection<File> dataDirs, StartupOption startOpt) throws IOException {
|
Collection<File> dataDirs, StartupOption startOpt) throws IOException {
|
||||||
assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()
|
|
||||||
: "Block-pool and name-node layout versions must be the same.";
|
|
||||||
|
|
||||||
// 1. For each BP data directory analyze the state and
|
// 1. For each BP data directory analyze the state and
|
||||||
// check whether all is consistent before transitioning.
|
// check whether all is consistent before transitioning.
|
||||||
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
|
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
|
||||||
|
@ -143,8 +139,6 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
// while others could be up-to-date for the regular startup.
|
// while others could be up-to-date for the regular startup.
|
||||||
for (int idx = 0; idx < getNumStorageDirs(); idx++) {
|
for (int idx = 0; idx < getNumStorageDirs(); idx++) {
|
||||||
doTransition(getStorageDir(idx), nsInfo, startOpt);
|
doTransition(getStorageDir(idx), nsInfo, startOpt);
|
||||||
assert getLayoutVersion() == nsInfo.getLayoutVersion()
|
|
||||||
: "Data-node and name-node layout versions must be the same.";
|
|
||||||
assert getCTime() == nsInfo.getCTime()
|
assert getCTime() == nsInfo.getCTime()
|
||||||
: "Data-node and name-node CTimes must be the same.";
|
: "Data-node and name-node CTimes must be the same.";
|
||||||
}
|
}
|
||||||
|
@ -175,11 +169,10 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
LOG.info("Formatting block pool " + blockpoolID + " directory "
|
LOG.info("Formatting block pool " + blockpoolID + " directory "
|
||||||
+ bpSdir.getCurrentDir());
|
+ bpSdir.getCurrentDir());
|
||||||
bpSdir.clearDirectory(); // create directory
|
bpSdir.clearDirectory(); // create directory
|
||||||
this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
||||||
this.cTime = nsInfo.getCTime();
|
this.cTime = nsInfo.getCTime();
|
||||||
this.namespaceID = nsInfo.getNamespaceID();
|
this.namespaceID = nsInfo.getNamespaceID();
|
||||||
this.blockpoolID = nsInfo.getBlockPoolID();
|
this.blockpoolID = nsInfo.getBlockPoolID();
|
||||||
this.storageType = NodeType.DATA_NODE;
|
|
||||||
writeProperties(bpSdir);
|
writeProperties(bpSdir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,7 +236,7 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
|
|
||||||
readProperties(sd);
|
readProperties(sd);
|
||||||
checkVersionUpgradable(this.layoutVersion);
|
checkVersionUpgradable(this.layoutVersion);
|
||||||
assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION
|
assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION
|
||||||
: "Future version is not allowed";
|
: "Future version is not allowed";
|
||||||
if (getNamespaceID() != nsInfo.getNamespaceID()) {
|
if (getNamespaceID() != nsInfo.getNamespaceID()) {
|
||||||
throw new IOException("Incompatible namespaceIDs in "
|
throw new IOException("Incompatible namespaceIDs in "
|
||||||
|
@ -257,11 +250,11 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
+ nsInfo.getBlockPoolID() + "; datanode blockpoolID = "
|
+ nsInfo.getBlockPoolID() + "; datanode blockpoolID = "
|
||||||
+ blockpoolID);
|
+ blockpoolID);
|
||||||
}
|
}
|
||||||
if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION
|
if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION
|
||||||
&& this.cTime == nsInfo.getCTime()) {
|
&& this.cTime == nsInfo.getCTime()) {
|
||||||
return; // regular startup
|
return; // regular startup
|
||||||
}
|
}
|
||||||
if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION
|
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION
|
||||||
|| this.cTime < nsInfo.getCTime()) {
|
|| this.cTime < nsInfo.getCTime()) {
|
||||||
doUpgrade(sd, nsInfo); // upgrade
|
doUpgrade(sd, nsInfo); // upgrade
|
||||||
return;
|
return;
|
||||||
|
@ -294,7 +287,8 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
*/
|
*/
|
||||||
void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
|
void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
|
||||||
// Upgrading is applicable only to release with federation or after
|
// Upgrading is applicable only to release with federation or after
|
||||||
if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
if (!DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
|
LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
|
||||||
|
@ -327,7 +321,7 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
|
|
||||||
// 3. Create new <SD>/current with block files hardlinks and VERSION
|
// 3. Create new <SD>/current with block files hardlinks and VERSION
|
||||||
linkAllBlocks(bpTmpDir, bpCurDir);
|
linkAllBlocks(bpTmpDir, bpCurDir);
|
||||||
this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
||||||
assert this.namespaceID == nsInfo.getNamespaceID()
|
assert this.namespaceID == nsInfo.getNamespaceID()
|
||||||
: "Data-node and name-node layout versions must be the same.";
|
: "Data-node and name-node layout versions must be the same.";
|
||||||
this.cTime = nsInfo.getCTime();
|
this.cTime = nsInfo.getCTime();
|
||||||
|
@ -349,7 +343,8 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* @throws IOException if the directory is not empty or it can not be removed
|
* @throws IOException if the directory is not empty or it can not be removed
|
||||||
*/
|
*/
|
||||||
private void cleanupDetachDir(File detachDir) throws IOException {
|
private void cleanupDetachDir(File detachDir) throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.APPEND_RBW_DIR, layoutVersion)
|
if (!DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion)
|
||||||
&& detachDir.exists() && detachDir.isDirectory()) {
|
&& detachDir.exists() && detachDir.isDirectory()) {
|
||||||
|
|
||||||
if (FileUtil.list(detachDir).length != 0) {
|
if (FileUtil.list(detachDir).length != 0) {
|
||||||
|
@ -389,13 +384,13 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
// the namespace state or can be further upgraded to it.
|
// the namespace state or can be further upgraded to it.
|
||||||
// In another word, we can only roll back when ( storedLV >= software LV)
|
// In another word, we can only roll back when ( storedLV >= software LV)
|
||||||
// && ( DN.previousCTime <= NN.ctime)
|
// && ( DN.previousCTime <= NN.ctime)
|
||||||
if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION &&
|
if (!(prevInfo.getLayoutVersion() >= HdfsConstants.DATANODE_LAYOUT_VERSION &&
|
||||||
prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
|
prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
|
||||||
throw new InconsistentFSStateException(bpSd.getRoot(),
|
throw new InconsistentFSStateException(bpSd.getRoot(),
|
||||||
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
|
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
|
||||||
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
|
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
|
||||||
+ " is newer than the namespace state: LV = "
|
+ " is newer than the namespace state: LV = "
|
||||||
+ nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
|
+ HdfsConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.info("Rolling back storage directory " + bpSd.getRoot()
|
LOG.info("Rolling back storage directory " + bpSd.getRoot()
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.SortedSet;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
|
||||||
|
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class DataNodeLayoutVersion {
|
||||||
|
/** Build layout version and corresponding feature matrix */
|
||||||
|
public final static Map<Integer, SortedSet<LayoutFeature>> FEATURES =
|
||||||
|
new HashMap<Integer, SortedSet<LayoutFeature>>();
|
||||||
|
|
||||||
|
public static final int CURRENT_LAYOUT_VERSION
|
||||||
|
= LayoutVersion.getCurrentLayoutVersion(FEATURES, Feature.values());
|
||||||
|
|
||||||
|
static{
|
||||||
|
LayoutVersion.updateMap(FEATURES, LayoutVersion.Feature.values());
|
||||||
|
LayoutVersion.updateMap(FEATURES, DataNodeLayoutVersion.Feature.values());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static SortedSet<LayoutFeature> getFeatures(int lv) {
|
||||||
|
return FEATURES.get(lv);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean supports(final LayoutFeature f, final int lv) {
|
||||||
|
return LayoutVersion.supports(FEATURES, f, lv);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enums for features that change the layout version.
|
||||||
|
* <br><br>
|
||||||
|
* To add a new layout version:
|
||||||
|
* <ul>
|
||||||
|
* <li>Define a new enum constant with a short enum name, the new layout version
|
||||||
|
* and description of the added feature.</li>
|
||||||
|
* <li>When adding a layout version with an ancestor that is not same as
|
||||||
|
* its immediate predecessor, use the constructor where a specific ancestor
|
||||||
|
* can be passed.
|
||||||
|
* </li>
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
public static enum Feature implements LayoutFeature {
|
||||||
|
FIRST_LAYOUT(-52, "First datenode layout");
|
||||||
|
|
||||||
|
private final FeatureInfo info;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DataNodeFeature that is added at layout version {@code lv} - 1.
|
||||||
|
* @param lv new layout version with the addition of this feature
|
||||||
|
* @param description description of the feature
|
||||||
|
*/
|
||||||
|
Feature(final int lv, final String description) {
|
||||||
|
this(lv, lv + 1, description, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DataNode feature that is added at layout version {@code ancestoryLV}.
|
||||||
|
* @param lv new layout version with the addition of this feature
|
||||||
|
* @param ancestorLV layout version from which the new lv is derived from.
|
||||||
|
* @param description description of the feature
|
||||||
|
* @param reserved true when this is a layout version reserved for previous
|
||||||
|
* version
|
||||||
|
* @param features set of features that are to be enabled for this version
|
||||||
|
*/
|
||||||
|
Feature(final int lv, final int ancestorLV, final String description,
|
||||||
|
boolean reserved, Feature... features) {
|
||||||
|
info = new FeatureInfo(lv, ancestorLV, description, reserved, features);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FeatureInfo getInfo() {
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
|
@ -91,7 +90,7 @@ public class DataStorage extends Storage {
|
||||||
}
|
}
|
||||||
|
|
||||||
public DataStorage(StorageInfo storageInfo) {
|
public DataStorage(StorageInfo storageInfo) {
|
||||||
super(NodeType.DATA_NODE, storageInfo);
|
super(storageInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized String getDatanodeUuid() {
|
public synchronized String getDatanodeUuid() {
|
||||||
|
@ -131,10 +130,9 @@ public class DataStorage extends Storage {
|
||||||
// DN storage has been initialized, no need to do anything
|
// DN storage has been initialized, no need to do anything
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
|
if( HdfsConstants.DATANODE_LAYOUT_VERSION == nsInfo.getLayoutVersion())
|
||||||
"Data-node version " + HdfsConstants.LAYOUT_VERSION +
|
LOG.info("Data-node version: " + HdfsConstants.DATANODE_LAYOUT_VERSION +
|
||||||
" and name-node layout version " + nsInfo.getLayoutVersion() +
|
" and name-node layout version: " + nsInfo.getLayoutVersion());
|
||||||
" must be the same.";
|
|
||||||
|
|
||||||
// 1. For each data directory calculate its state and
|
// 1. For each data directory calculate its state and
|
||||||
// check whether all is consistent before transitioning.
|
// check whether all is consistent before transitioning.
|
||||||
|
@ -261,7 +259,7 @@ public class DataStorage extends Storage {
|
||||||
void format(StorageDirectory sd, NamespaceInfo nsInfo,
|
void format(StorageDirectory sd, NamespaceInfo nsInfo,
|
||||||
String datanodeUuid) throws IOException {
|
String datanodeUuid) throws IOException {
|
||||||
sd.clearDirectory(); // create directory
|
sd.clearDirectory(); // create directory
|
||||||
this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
||||||
this.clusterID = nsInfo.getClusterID();
|
this.clusterID = nsInfo.getClusterID();
|
||||||
this.namespaceID = nsInfo.getNamespaceID();
|
this.namespaceID = nsInfo.getNamespaceID();
|
||||||
this.cTime = 0;
|
this.cTime = 0;
|
||||||
|
@ -297,7 +295,8 @@ public class DataStorage extends Storage {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set NamespaceID in version before federation
|
// Set NamespaceID in version before federation
|
||||||
if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
if (!DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
|
||||||
props.setProperty("namespaceID", String.valueOf(namespaceID));
|
props.setProperty("namespaceID", String.valueOf(namespaceID));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -321,11 +320,12 @@ public class DataStorage extends Storage {
|
||||||
setLayoutVersion(props, sd);
|
setLayoutVersion(props, sd);
|
||||||
}
|
}
|
||||||
setcTime(props, sd);
|
setcTime(props, sd);
|
||||||
setStorageType(props, sd);
|
checkStorageType(props, sd);
|
||||||
setClusterId(props, layoutVersion, sd);
|
setClusterId(props, layoutVersion, sd);
|
||||||
|
|
||||||
// Read NamespaceID in version before federation
|
// Read NamespaceID in version before federation
|
||||||
if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
if (!DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
|
||||||
setNamespaceID(props, sd);
|
setNamespaceID(props, sd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -414,11 +414,12 @@ public class DataStorage extends Storage {
|
||||||
}
|
}
|
||||||
readProperties(sd);
|
readProperties(sd);
|
||||||
checkVersionUpgradable(this.layoutVersion);
|
checkVersionUpgradable(this.layoutVersion);
|
||||||
assert this.layoutVersion >= HdfsConstants.LAYOUT_VERSION :
|
assert this.layoutVersion >= HdfsConstants.DATANODE_LAYOUT_VERSION :
|
||||||
"Future version is not allowed";
|
"Future version is not allowed";
|
||||||
|
|
||||||
boolean federationSupported =
|
boolean federationSupported =
|
||||||
LayoutVersion.supports(Feature.FEDERATION, layoutVersion);
|
DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, layoutVersion);
|
||||||
// For pre-federation version - validate the namespaceID
|
// For pre-federation version - validate the namespaceID
|
||||||
if (!federationSupported &&
|
if (!federationSupported &&
|
||||||
getNamespaceID() != nsInfo.getNamespaceID()) {
|
getNamespaceID() != nsInfo.getNamespaceID()) {
|
||||||
|
@ -440,11 +441,11 @@ public class DataStorage extends Storage {
|
||||||
// meaningful at BlockPoolSliceStorage level.
|
// meaningful at BlockPoolSliceStorage level.
|
||||||
|
|
||||||
// regular start up.
|
// regular start up.
|
||||||
if (this.layoutVersion == HdfsConstants.LAYOUT_VERSION)
|
if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION)
|
||||||
return; // regular startup
|
return; // regular startup
|
||||||
|
|
||||||
// do upgrade
|
// do upgrade
|
||||||
if (this.layoutVersion > HdfsConstants.LAYOUT_VERSION) {
|
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) {
|
||||||
doUpgrade(sd, nsInfo); // upgrade
|
doUpgrade(sd, nsInfo); // upgrade
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -455,7 +456,7 @@ public class DataStorage extends Storage {
|
||||||
// failed.
|
// failed.
|
||||||
throw new IOException("BUG: The stored LV = " + this.getLayoutVersion()
|
throw new IOException("BUG: The stored LV = " + this.getLayoutVersion()
|
||||||
+ " is newer than the supported LV = "
|
+ " is newer than the supported LV = "
|
||||||
+ HdfsConstants.LAYOUT_VERSION
|
+ HdfsConstants.DATANODE_LAYOUT_VERSION
|
||||||
+ " or name node LV = "
|
+ " or name node LV = "
|
||||||
+ nsInfo.getLayoutVersion());
|
+ nsInfo.getLayoutVersion());
|
||||||
}
|
}
|
||||||
|
@ -485,7 +486,8 @@ public class DataStorage extends Storage {
|
||||||
void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
|
void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
|
||||||
// If the existing on-disk layout version supportes federation, simply
|
// If the existing on-disk layout version supportes federation, simply
|
||||||
// update its layout version.
|
// update its layout version.
|
||||||
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
if (DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
|
||||||
// The VERSION file is already read in. Override the layoutVersion
|
// The VERSION file is already read in. Override the layoutVersion
|
||||||
// field and overwrite the file.
|
// field and overwrite the file.
|
||||||
LOG.info("Updating layout version from " + layoutVersion + " to "
|
LOG.info("Updating layout version from " + layoutVersion + " to "
|
||||||
|
@ -528,7 +530,7 @@ public class DataStorage extends Storage {
|
||||||
linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT));
|
linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT));
|
||||||
|
|
||||||
// 4. Write version file under <SD>/current
|
// 4. Write version file under <SD>/current
|
||||||
layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
||||||
clusterID = nsInfo.getClusterID();
|
clusterID = nsInfo.getClusterID();
|
||||||
writeProperties(sd);
|
writeProperties(sd);
|
||||||
|
|
||||||
|
@ -548,7 +550,8 @@ public class DataStorage extends Storage {
|
||||||
* @throws IOException if the directory is not empty or it can not be removed
|
* @throws IOException if the directory is not empty or it can not be removed
|
||||||
*/
|
*/
|
||||||
private void cleanupDetachDir(File detachDir) throws IOException {
|
private void cleanupDetachDir(File detachDir) throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.APPEND_RBW_DIR, layoutVersion) &&
|
if (!DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion) &&
|
||||||
detachDir.exists() && detachDir.isDirectory() ) {
|
detachDir.exists() && detachDir.isDirectory() ) {
|
||||||
|
|
||||||
if (FileUtil.list(detachDir).length != 0 ) {
|
if (FileUtil.list(detachDir).length != 0 ) {
|
||||||
|
@ -590,9 +593,10 @@ public class DataStorage extends Storage {
|
||||||
// running a wrong version. But this will be detected in block pool
|
// running a wrong version. But this will be detected in block pool
|
||||||
// level and the invalid VERSION content will be overwritten when
|
// level and the invalid VERSION content will be overwritten when
|
||||||
// the error is corrected and rollback is retried.
|
// the error is corrected and rollback is retried.
|
||||||
if (LayoutVersion.supports(Feature.FEDERATION,
|
if (DataNodeLayoutVersion.supports(
|
||||||
HdfsConstants.LAYOUT_VERSION) &&
|
LayoutVersion.Feature.FEDERATION,
|
||||||
HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion()) {
|
HdfsConstants.DATANODE_LAYOUT_VERSION) &&
|
||||||
|
HdfsConstants.DATANODE_LAYOUT_VERSION == nsInfo.getLayoutVersion()) {
|
||||||
readProperties(sd, nsInfo.getLayoutVersion());
|
readProperties(sd, nsInfo.getLayoutVersion());
|
||||||
writeProperties(sd);
|
writeProperties(sd);
|
||||||
LOG.info("Layout version rolled back to " +
|
LOG.info("Layout version rolled back to " +
|
||||||
|
@ -605,7 +609,7 @@ public class DataStorage extends Storage {
|
||||||
|
|
||||||
// We allow rollback to a state, which is either consistent with
|
// We allow rollback to a state, which is either consistent with
|
||||||
// the namespace state or can be further upgraded to it.
|
// the namespace state or can be further upgraded to it.
|
||||||
if (!(prevInfo.getLayoutVersion() >= HdfsConstants.LAYOUT_VERSION
|
if (!(prevInfo.getLayoutVersion() >= HdfsConstants.DATANODE_LAYOUT_VERSION
|
||||||
&& prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback
|
&& prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback
|
||||||
throw new InconsistentFSStateException(sd.getRoot(),
|
throw new InconsistentFSStateException(sd.getRoot(),
|
||||||
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
|
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
|
||||||
|
@ -710,7 +714,8 @@ public class DataStorage extends Storage {
|
||||||
HardLink hardLink = new HardLink();
|
HardLink hardLink = new HardLink();
|
||||||
// do the link
|
// do the link
|
||||||
int diskLayoutVersion = this.getLayoutVersion();
|
int diskLayoutVersion = this.getLayoutVersion();
|
||||||
if (LayoutVersion.supports(Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
|
if (DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
|
||||||
// hardlink finalized blocks in tmpDir/finalized
|
// hardlink finalized blocks in tmpDir/finalized
|
||||||
linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED),
|
linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED),
|
||||||
new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
|
new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
|
||||||
|
|
|
@ -413,9 +413,9 @@ public class BackupNode extends NameNode {
|
||||||
LOG.fatal(errorMsg);
|
LOG.fatal(errorMsg);
|
||||||
throw new IOException(errorMsg);
|
throw new IOException(errorMsg);
|
||||||
}
|
}
|
||||||
assert HdfsConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
|
assert HdfsConstants.NAMENODE_LAYOUT_VERSION == nsInfo.getLayoutVersion() :
|
||||||
"Active and backup node layout versions must be the same. Expected: "
|
"Active and backup node layout versions must be the same. Expected: "
|
||||||
+ HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
|
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
|
||||||
return nsInfo;
|
return nsInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||||
|
|
||||||
import com.google.common.collect.ComparisonChain;
|
import com.google.common.collect.ComparisonChain;
|
||||||
|
@ -47,6 +48,7 @@ public class CheckpointSignature extends StorageInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
CheckpointSignature(String str) {
|
CheckpointSignature(String str) {
|
||||||
|
super(NodeType.NAME_NODE);
|
||||||
String[] fields = str.split(FIELD_SEPARATOR);
|
String[] fields = str.split(FIELD_SEPARATOR);
|
||||||
assert fields.length == NUM_FIELDS :
|
assert fields.length == NUM_FIELDS :
|
||||||
"Must be " + NUM_FIELDS + " fields in CheckpointSignature";
|
"Must be " + NUM_FIELDS + " fields in CheckpointSignature";
|
||||||
|
|
|
@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
|
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException;
|
||||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||||
|
@ -149,7 +148,8 @@ public class EditLogFileInputStream extends EditLogInputStream {
|
||||||
} catch (EOFException eofe) {
|
} catch (EOFException eofe) {
|
||||||
throw new LogHeaderCorruptException("No header found in log");
|
throw new LogHeaderCorruptException("No header found in log");
|
||||||
}
|
}
|
||||||
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_LAYOUT_FLAGS, logVersion)) {
|
||||||
try {
|
try {
|
||||||
LayoutFlags.read(dataIn);
|
LayoutFlags.read(dataIn);
|
||||||
} catch (EOFException eofe) {
|
} catch (EOFException eofe) {
|
||||||
|
@ -328,12 +328,12 @@ public class EditLogFileInputStream extends EditLogInputStream {
|
||||||
throw new LogHeaderCorruptException(
|
throw new LogHeaderCorruptException(
|
||||||
"Reached EOF when reading log header");
|
"Reached EOF when reading log header");
|
||||||
}
|
}
|
||||||
if (logVersion < HdfsConstants.LAYOUT_VERSION || // future version
|
if (logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version
|
||||||
logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION) { // unsupported
|
logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION) { // unsupported
|
||||||
throw new LogHeaderCorruptException(
|
throw new LogHeaderCorruptException(
|
||||||
"Unexpected version of the file system log file: "
|
"Unexpected version of the file system log file: "
|
||||||
+ logVersion + ". Current version = "
|
+ logVersion + ". Current version = "
|
||||||
+ HdfsConstants.LAYOUT_VERSION + ".");
|
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + ".");
|
||||||
}
|
}
|
||||||
return logVersion;
|
return logVersion;
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,7 +132,7 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public static void writeHeader(DataOutputStream out) throws IOException {
|
public static void writeHeader(DataOutputStream out) throws IOException {
|
||||||
out.writeInt(HdfsConstants.LAYOUT_VERSION);
|
out.writeInt(HdfsConstants.NAMENODE_LAYOUT_VERSION);
|
||||||
LayoutFlags.write(out);
|
LayoutFlags.write(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
|
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
|
@ -285,7 +284,8 @@ public class FSEditLogLoader {
|
||||||
long inodeId = inodeIdFromOp;
|
long inodeId = inodeIdFromOp;
|
||||||
|
|
||||||
if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
|
if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
|
||||||
throw new IOException("The layout version " + logVersion
|
throw new IOException("The layout version " + logVersion
|
||||||
+ " supports inodeId but gave bogus inodeId");
|
+ " supports inodeId but gave bogus inodeId");
|
||||||
}
|
}
|
||||||
|
@ -922,7 +922,7 @@ public class FSEditLogLoader {
|
||||||
// The editlog must be emptied by restarting the namenode, before proceeding
|
// The editlog must be emptied by restarting the namenode, before proceeding
|
||||||
// with the upgrade.
|
// with the upgrade.
|
||||||
if (Storage.is203LayoutVersion(logVersion)
|
if (Storage.is203LayoutVersion(logVersion)
|
||||||
&& logVersion != HdfsConstants.LAYOUT_VERSION) {
|
&& logVersion != HdfsConstants.NAMENODE_LAYOUT_VERSION) {
|
||||||
String msg = "During upgrade failed to load the editlog version "
|
String msg = "During upgrade failed to load the editlog version "
|
||||||
+ logVersion + " from release 0.20.203. Please go back to the old "
|
+ logVersion + " from release 0.20.203. Please go back to the old "
|
||||||
+ " release and restart the namenode. This empties the editlog "
|
+ " release and restart the namenode. This empties the editlog "
|
||||||
|
|
|
@ -87,7 +87,6 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.util.XMLUtils;
|
import org.apache.hadoop.hdfs.util.XMLUtils;
|
||||||
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
|
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
|
||||||
|
@ -248,8 +247,8 @@ public abstract class FSEditLogOp {
|
||||||
|
|
||||||
void readRpcIds(DataInputStream in, int logVersion)
|
void readRpcIds(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_SUPPORT_RETRYCACHE,
|
if (NameNodeLayoutVersion.supports(
|
||||||
logVersion)) {
|
LayoutVersion.Feature.EDITLOG_SUPPORT_RETRYCACHE, logVersion)) {
|
||||||
this.rpcClientId = FSImageSerialization.readBytes(in);
|
this.rpcClientId = FSImageSerialization.readBytes(in);
|
||||||
this.rpcCallId = FSImageSerialization.readInt(in);
|
this.rpcCallId = FSImageSerialization.readInt(in);
|
||||||
}
|
}
|
||||||
|
@ -385,18 +384,20 @@ public abstract class FSEditLogOp {
|
||||||
@Override
|
@Override
|
||||||
void readFields(DataInputStream in, int logVersion)
|
void readFields(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.length = in.readInt();
|
this.length = in.readInt();
|
||||||
}
|
}
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
|
||||||
this.inodeId = in.readLong();
|
this.inodeId = in.readLong();
|
||||||
} else {
|
} else {
|
||||||
// The inodeId should be updated when this editLogOp is applied
|
// The inodeId should be updated when this editLogOp is applied
|
||||||
this.inodeId = INodeId.GRANDFATHER_INODE_ID;
|
this.inodeId = INodeId.GRANDFATHER_INODE_ID;
|
||||||
}
|
}
|
||||||
if ((-17 < logVersion && length != 4) ||
|
if ((-17 < logVersion && length != 4) ||
|
||||||
(logVersion <= -17 && length != 5 && !LayoutVersion.supports(
|
(logVersion <= -17 && length != 5 && !NameNodeLayoutVersion.supports(
|
||||||
Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
|
||||||
throw new IOException("Incorrect data format." +
|
throw new IOException("Incorrect data format." +
|
||||||
" logVersion is " + logVersion +
|
" logVersion is " + logVersion +
|
||||||
" but writables.length is " +
|
" but writables.length is " +
|
||||||
|
@ -404,7 +405,8 @@ public abstract class FSEditLogOp {
|
||||||
}
|
}
|
||||||
this.path = FSImageSerialization.readString(in);
|
this.path = FSImageSerialization.readString(in);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.replication = FSImageSerialization.readShort(in);
|
this.replication = FSImageSerialization.readShort(in);
|
||||||
this.mtime = FSImageSerialization.readLong(in);
|
this.mtime = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
|
@ -412,8 +414,10 @@ public abstract class FSEditLogOp {
|
||||||
this.mtime = readLong(in);
|
this.mtime = readLong(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) {
|
||||||
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.atime = FSImageSerialization.readLong(in);
|
this.atime = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.atime = readLong(in);
|
this.atime = readLong(in);
|
||||||
|
@ -422,7 +426,8 @@ public abstract class FSEditLogOp {
|
||||||
this.atime = 0;
|
this.atime = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.blockSize = FSImageSerialization.readLong(in);
|
this.blockSize = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.blockSize = readLong(in);
|
this.blockSize = readLong(in);
|
||||||
|
@ -831,7 +836,8 @@ public abstract class FSEditLogOp {
|
||||||
void readFields(DataInputStream in, int logVersion)
|
void readFields(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.path = FSImageSerialization.readString(in);
|
this.path = FSImageSerialization.readString(in);
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.replication = FSImageSerialization.readShort(in);
|
this.replication = FSImageSerialization.readShort(in);
|
||||||
} else {
|
} else {
|
||||||
this.replication = readShort(in);
|
this.replication = readShort(in);
|
||||||
|
@ -922,7 +928,8 @@ public abstract class FSEditLogOp {
|
||||||
@Override
|
@Override
|
||||||
void readFields(DataInputStream in, int logVersion)
|
void readFields(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.length = in.readInt();
|
this.length = in.readInt();
|
||||||
if (length < 3) { // trg, srcs.., timestamp
|
if (length < 3) { // trg, srcs.., timestamp
|
||||||
throw new IOException("Incorrect data format " +
|
throw new IOException("Incorrect data format " +
|
||||||
|
@ -931,7 +938,8 @@ public abstract class FSEditLogOp {
|
||||||
}
|
}
|
||||||
this.trg = FSImageSerialization.readString(in);
|
this.trg = FSImageSerialization.readString(in);
|
||||||
int srcSize = 0;
|
int srcSize = 0;
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
srcSize = in.readInt();
|
srcSize = in.readInt();
|
||||||
} else {
|
} else {
|
||||||
srcSize = this.length - 1 - 1; // trg and timestamp
|
srcSize = this.length - 1 - 1; // trg and timestamp
|
||||||
|
@ -950,7 +958,8 @@ public abstract class FSEditLogOp {
|
||||||
srcs[i]= FSImageSerialization.readString(in);
|
srcs[i]= FSImageSerialization.readString(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.timestamp = FSImageSerialization.readLong(in);
|
this.timestamp = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.timestamp = readLong(in);
|
this.timestamp = readLong(in);
|
||||||
|
@ -1056,7 +1065,8 @@ public abstract class FSEditLogOp {
|
||||||
@Override
|
@Override
|
||||||
void readFields(DataInputStream in, int logVersion)
|
void readFields(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.length = in.readInt();
|
this.length = in.readInt();
|
||||||
if (this.length != 3) {
|
if (this.length != 3) {
|
||||||
throw new IOException("Incorrect data format. "
|
throw new IOException("Incorrect data format. "
|
||||||
|
@ -1065,7 +1075,8 @@ public abstract class FSEditLogOp {
|
||||||
}
|
}
|
||||||
this.src = FSImageSerialization.readString(in);
|
this.src = FSImageSerialization.readString(in);
|
||||||
this.dst = FSImageSerialization.readString(in);
|
this.dst = FSImageSerialization.readString(in);
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.timestamp = FSImageSerialization.readLong(in);
|
this.timestamp = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.timestamp = readLong(in);
|
this.timestamp = readLong(in);
|
||||||
|
@ -1152,14 +1163,16 @@ public abstract class FSEditLogOp {
|
||||||
@Override
|
@Override
|
||||||
void readFields(DataInputStream in, int logVersion)
|
void readFields(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.length = in.readInt();
|
this.length = in.readInt();
|
||||||
if (this.length != 2) {
|
if (this.length != 2) {
|
||||||
throw new IOException("Incorrect data format. " + "delete operation.");
|
throw new IOException("Incorrect data format. " + "delete operation.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
this.path = FSImageSerialization.readString(in);
|
this.path = FSImageSerialization.readString(in);
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.timestamp = FSImageSerialization.readLong(in);
|
this.timestamp = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.timestamp = readLong(in);
|
this.timestamp = readLong(in);
|
||||||
|
@ -1253,22 +1266,26 @@ public abstract class FSEditLogOp {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void readFields(DataInputStream in, int logVersion) throws IOException {
|
void readFields(DataInputStream in, int logVersion) throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.length = in.readInt();
|
this.length = in.readInt();
|
||||||
}
|
}
|
||||||
if (-17 < logVersion && length != 2 ||
|
if (-17 < logVersion && length != 2 ||
|
||||||
logVersion <= -17 && length != 3
|
logVersion <= -17 && length != 3
|
||||||
&& !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
&& !NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
throw new IOException("Incorrect data format. Mkdir operation.");
|
throw new IOException("Incorrect data format. Mkdir operation.");
|
||||||
}
|
}
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
|
||||||
this.inodeId = FSImageSerialization.readLong(in);
|
this.inodeId = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
// This id should be updated when this editLogOp is applied
|
// This id should be updated when this editLogOp is applied
|
||||||
this.inodeId = INodeId.GRANDFATHER_INODE_ID;
|
this.inodeId = INodeId.GRANDFATHER_INODE_ID;
|
||||||
}
|
}
|
||||||
this.path = FSImageSerialization.readString(in);
|
this.path = FSImageSerialization.readString(in);
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.timestamp = FSImageSerialization.readLong(in);
|
this.timestamp = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.timestamp = readLong(in);
|
this.timestamp = readLong(in);
|
||||||
|
@ -1277,8 +1294,10 @@ public abstract class FSEditLogOp {
|
||||||
// The disk format stores atimes for directories as well.
|
// The disk format stores atimes for directories as well.
|
||||||
// However, currently this is not being updated/used because of
|
// However, currently this is not being updated/used because of
|
||||||
// performance reasons.
|
// performance reasons.
|
||||||
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) {
|
||||||
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
FSImageSerialization.readLong(in);
|
FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
readLong(in);
|
readLong(in);
|
||||||
|
@ -1861,7 +1880,8 @@ public abstract class FSEditLogOp {
|
||||||
@Override
|
@Override
|
||||||
void readFields(DataInputStream in, int logVersion)
|
void readFields(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.length = in.readInt();
|
this.length = in.readInt();
|
||||||
if (length != 3) {
|
if (length != 3) {
|
||||||
throw new IOException("Incorrect data format. " + "times operation.");
|
throw new IOException("Incorrect data format. " + "times operation.");
|
||||||
|
@ -1869,7 +1889,8 @@ public abstract class FSEditLogOp {
|
||||||
}
|
}
|
||||||
this.path = FSImageSerialization.readString(in);
|
this.path = FSImageSerialization.readString(in);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.mtime = FSImageSerialization.readLong(in);
|
this.mtime = FSImageSerialization.readLong(in);
|
||||||
this.atime = FSImageSerialization.readLong(in);
|
this.atime = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1978,14 +1999,16 @@ public abstract class FSEditLogOp {
|
||||||
@Override
|
@Override
|
||||||
void readFields(DataInputStream in, int logVersion)
|
void readFields(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.length = in.readInt();
|
this.length = in.readInt();
|
||||||
if (this.length != 4) {
|
if (this.length != 4) {
|
||||||
throw new IOException("Incorrect data format. "
|
throw new IOException("Incorrect data format. "
|
||||||
+ "symlink operation.");
|
+ "symlink operation.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
|
||||||
this.inodeId = FSImageSerialization.readLong(in);
|
this.inodeId = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
// This id should be updated when the editLogOp is applied
|
// This id should be updated when the editLogOp is applied
|
||||||
|
@ -1994,7 +2017,8 @@ public abstract class FSEditLogOp {
|
||||||
this.path = FSImageSerialization.readString(in);
|
this.path = FSImageSerialization.readString(in);
|
||||||
this.value = FSImageSerialization.readString(in);
|
this.value = FSImageSerialization.readString(in);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.mtime = FSImageSerialization.readLong(in);
|
this.mtime = FSImageSerialization.readLong(in);
|
||||||
this.atime = FSImageSerialization.readLong(in);
|
this.atime = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2112,7 +2136,8 @@ public abstract class FSEditLogOp {
|
||||||
@Override
|
@Override
|
||||||
void readFields(DataInputStream in, int logVersion)
|
void readFields(DataInputStream in, int logVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.length = in.readInt();
|
this.length = in.readInt();
|
||||||
if (this.length != 3) {
|
if (this.length != 3) {
|
||||||
throw new IOException("Incorrect data format. " + "Rename operation.");
|
throw new IOException("Incorrect data format. " + "Rename operation.");
|
||||||
|
@ -2121,7 +2146,8 @@ public abstract class FSEditLogOp {
|
||||||
this.src = FSImageSerialization.readString(in);
|
this.src = FSImageSerialization.readString(in);
|
||||||
this.dst = FSImageSerialization.readString(in);
|
this.dst = FSImageSerialization.readString(in);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.timestamp = FSImageSerialization.readLong(in);
|
this.timestamp = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.timestamp = readLong(in);
|
this.timestamp = readLong(in);
|
||||||
|
@ -2332,7 +2358,8 @@ public abstract class FSEditLogOp {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.token = new DelegationTokenIdentifier();
|
this.token = new DelegationTokenIdentifier();
|
||||||
this.token.readFields(in);
|
this.token.readFields(in);
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.expiryTime = FSImageSerialization.readLong(in);
|
this.expiryTime = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.expiryTime = readLong(in);
|
this.expiryTime = readLong(in);
|
||||||
|
@ -2404,7 +2431,8 @@ public abstract class FSEditLogOp {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.token = new DelegationTokenIdentifier();
|
this.token = new DelegationTokenIdentifier();
|
||||||
this.token.readFields(in);
|
this.token.readFields(in);
|
||||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||||
this.expiryTime = FSImageSerialization.readLong(in);
|
this.expiryTime = FSImageSerialization.readLong(in);
|
||||||
} else {
|
} else {
|
||||||
this.expiryTime = readLong(in);
|
this.expiryTime = readLong(in);
|
||||||
|
@ -3488,7 +3516,8 @@ public abstract class FSEditLogOp {
|
||||||
*/
|
*/
|
||||||
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
|
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
|
||||||
this.logVersion = logVersion;
|
this.logVersion = logVersion;
|
||||||
if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
|
||||||
this.checksum = new PureJavaCrc32();
|
this.checksum = new PureJavaCrc32();
|
||||||
} else {
|
} else {
|
||||||
this.checksum = null;
|
this.checksum = null;
|
||||||
|
@ -3627,7 +3656,8 @@ public abstract class FSEditLogOp {
|
||||||
throw new IOException("Read invalid opcode " + opCode);
|
throw new IOException("Read invalid opcode " + opCode);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
|
||||||
// Read the txid
|
// Read the txid
|
||||||
op.setTransactionId(in.readLong());
|
op.setTransactionId(in.readLong());
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
|
@ -216,11 +215,11 @@ public class FSImage implements Closeable {
|
||||||
}
|
}
|
||||||
if (startOpt != StartupOption.UPGRADE
|
if (startOpt != StartupOption.UPGRADE
|
||||||
&& layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
|
&& layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
|
||||||
&& layoutVersion != HdfsConstants.LAYOUT_VERSION) {
|
&& layoutVersion != HdfsConstants.NAMENODE_LAYOUT_VERSION) {
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
"\nFile system image contains an old layout version "
|
"\nFile system image contains an old layout version "
|
||||||
+ storage.getLayoutVersion() + ".\nAn upgrade to version "
|
+ storage.getLayoutVersion() + ".\nAn upgrade to version "
|
||||||
+ HdfsConstants.LAYOUT_VERSION + " is required.\n"
|
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + " is required.\n"
|
||||||
+ "Please restart NameNode with -upgrade option.");
|
+ "Please restart NameNode with -upgrade option.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,7 +332,7 @@ public class FSImage implements Closeable {
|
||||||
long oldCTime = storage.getCTime();
|
long oldCTime = storage.getCTime();
|
||||||
storage.cTime = now(); // generate new cTime for the state
|
storage.cTime = now(); // generate new cTime for the state
|
||||||
int oldLV = storage.getLayoutVersion();
|
int oldLV = storage.getLayoutVersion();
|
||||||
storage.layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
storage.layoutVersion = HdfsConstants.NAMENODE_LAYOUT_VERSION;
|
||||||
|
|
||||||
List<StorageDirectory> errorSDs =
|
List<StorageDirectory> errorSDs =
|
||||||
Collections.synchronizedList(new ArrayList<StorageDirectory>());
|
Collections.synchronizedList(new ArrayList<StorageDirectory>());
|
||||||
|
@ -393,11 +392,11 @@ public class FSImage implements Closeable {
|
||||||
boolean canRollback = false;
|
boolean canRollback = false;
|
||||||
FSImage prevState = new FSImage(conf);
|
FSImage prevState = new FSImage(conf);
|
||||||
try {
|
try {
|
||||||
prevState.getStorage().layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
prevState.getStorage().layoutVersion = HdfsConstants.NAMENODE_LAYOUT_VERSION;
|
||||||
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
|
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
|
||||||
StorageDirectory sd = it.next();
|
StorageDirectory sd = it.next();
|
||||||
if (!NNUpgradeUtil.canRollBack(sd, storage, prevState.getStorage(),
|
if (!NNUpgradeUtil.canRollBack(sd, storage, prevState.getStorage(),
|
||||||
HdfsConstants.LAYOUT_VERSION)) {
|
HdfsConstants.NAMENODE_LAYOUT_VERSION)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
canRollback = true;
|
canRollback = true;
|
||||||
|
@ -407,7 +406,7 @@ public class FSImage implements Closeable {
|
||||||
// If HA is enabled, check if the shared log can be rolled back as well.
|
// If HA is enabled, check if the shared log can be rolled back as well.
|
||||||
editLog.initJournalsForWrite();
|
editLog.initJournalsForWrite();
|
||||||
canRollback |= editLog.canRollBackSharedLog(prevState.getStorage(),
|
canRollback |= editLog.canRollBackSharedLog(prevState.getStorage(),
|
||||||
HdfsConstants.LAYOUT_VERSION);
|
HdfsConstants.NAMENODE_LAYOUT_VERSION);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!canRollback)
|
if (!canRollback)
|
||||||
|
@ -561,8 +560,8 @@ public class FSImage implements Closeable {
|
||||||
|
|
||||||
initEditLog(startOpt);
|
initEditLog(startOpt);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT,
|
if (NameNodeLayoutVersion.supports(
|
||||||
getLayoutVersion())) {
|
LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
|
||||||
// If we're open for write, we're either non-HA or we're the active NN, so
|
// If we're open for write, we're either non-HA or we're the active NN, so
|
||||||
// we better be able to load all the edits. If we're the standby NN, it's
|
// we better be able to load all the edits. If we're the standby NN, it's
|
||||||
// OK to not be able to read all of edits right now.
|
// OK to not be able to read all of edits right now.
|
||||||
|
@ -618,13 +617,13 @@ public class FSImage implements Closeable {
|
||||||
StorageDirectory sdForProperties = imageFile.sd;
|
StorageDirectory sdForProperties = imageFile.sd;
|
||||||
storage.readProperties(sdForProperties);
|
storage.readProperties(sdForProperties);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT,
|
if (NameNodeLayoutVersion.supports(
|
||||||
getLayoutVersion())) {
|
LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
|
||||||
// For txid-based layout, we should have a .md5 file
|
// For txid-based layout, we should have a .md5 file
|
||||||
// next to the image file
|
// next to the image file
|
||||||
loadFSImage(imageFile.getFile(), target, recovery);
|
loadFSImage(imageFile.getFile(), target, recovery);
|
||||||
} else if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM,
|
} else if (NameNodeLayoutVersion.supports(
|
||||||
getLayoutVersion())) {
|
LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
|
||||||
// In 0.22, we have the checksum stored in the VERSION file.
|
// In 0.22, we have the checksum stored in the VERSION file.
|
||||||
String md5 = storage.getDeprecatedProperty(
|
String md5 = storage.getDeprecatedProperty(
|
||||||
NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
|
NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
|
||||||
|
|
|
@ -260,9 +260,10 @@ public class FSImageFormat {
|
||||||
"imgVersion " + imgVersion +
|
"imgVersion " + imgVersion +
|
||||||
" expected to be " + getLayoutVersion());
|
" expected to be " + getLayoutVersion());
|
||||||
}
|
}
|
||||||
boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
|
boolean supportSnapshot = NameNodeLayoutVersion.supports(
|
||||||
imgVersion);
|
LayoutVersion.Feature.SNAPSHOT, imgVersion);
|
||||||
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_LAYOUT_FLAGS, imgVersion)) {
|
||||||
LayoutFlags.read(in);
|
LayoutFlags.read(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,7 +276,8 @@ public class FSImageFormat {
|
||||||
long genstamp = in.readLong();
|
long genstamp = in.readLong();
|
||||||
namesystem.setGenerationStampV1(genstamp);
|
namesystem.setGenerationStampV1(genstamp);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.SEQUENTIAL_BLOCK_ID, imgVersion)) {
|
||||||
// read the starting generation stamp for sequential block IDs
|
// read the starting generation stamp for sequential block IDs
|
||||||
genstamp = in.readLong();
|
genstamp = in.readLong();
|
||||||
namesystem.setGenerationStampV2(genstamp);
|
namesystem.setGenerationStampV2(genstamp);
|
||||||
|
@ -297,14 +299,16 @@ public class FSImageFormat {
|
||||||
|
|
||||||
// read the transaction ID of the last edit represented by
|
// read the transaction ID of the last edit represented by
|
||||||
// this image
|
// this image
|
||||||
if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.STORED_TXIDS, imgVersion)) {
|
||||||
imgTxId = in.readLong();
|
imgTxId = in.readLong();
|
||||||
} else {
|
} else {
|
||||||
imgTxId = 0;
|
imgTxId = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// read the last allocated inode id in the fsimage
|
// read the last allocated inode id in the fsimage
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, imgVersion)) {
|
||||||
long lastInodeId = in.readLong();
|
long lastInodeId = in.readLong();
|
||||||
namesystem.resetLastInodeId(lastInodeId);
|
namesystem.resetLastInodeId(lastInodeId);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
@ -323,7 +327,8 @@ public class FSImageFormat {
|
||||||
|
|
||||||
// read compression related info
|
// read compression related info
|
||||||
FSImageCompression compression;
|
FSImageCompression compression;
|
||||||
if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FSIMAGE_COMPRESSION, imgVersion)) {
|
||||||
compression = FSImageCompression.readCompressionHeader(conf, in);
|
compression = FSImageCompression.readCompressionHeader(conf, in);
|
||||||
} else {
|
} else {
|
||||||
compression = FSImageCompression.createNoopCompression();
|
compression = FSImageCompression.createNoopCompression();
|
||||||
|
@ -336,8 +341,8 @@ public class FSImageFormat {
|
||||||
LOG.info("Number of files = " + numFiles);
|
LOG.info("Number of files = " + numFiles);
|
||||||
prog.setTotal(Phase.LOADING_FSIMAGE, step, numFiles);
|
prog.setTotal(Phase.LOADING_FSIMAGE, step, numFiles);
|
||||||
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
|
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
|
||||||
if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
|
if (NameNodeLayoutVersion.supports(
|
||||||
imgVersion)) {
|
LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, imgVersion)) {
|
||||||
if (supportSnapshot) {
|
if (supportSnapshot) {
|
||||||
loadLocalNameINodesWithSnapshot(numFiles, in, counter);
|
loadLocalNameINodesWithSnapshot(numFiles, in, counter);
|
||||||
} else {
|
} else {
|
||||||
|
@ -394,9 +399,10 @@ public class FSImageFormat {
|
||||||
*/
|
*/
|
||||||
private void loadLocalNameINodesWithSnapshot(long numFiles, DataInput in,
|
private void loadLocalNameINodesWithSnapshot(long numFiles, DataInput in,
|
||||||
Counter counter) throws IOException {
|
Counter counter) throws IOException {
|
||||||
assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
|
assert NameNodeLayoutVersion.supports(
|
||||||
getLayoutVersion());
|
LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, getLayoutVersion());
|
||||||
assert LayoutVersion.supports(Feature.SNAPSHOT, getLayoutVersion());
|
assert NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.SNAPSHOT, getLayoutVersion());
|
||||||
|
|
||||||
// load root
|
// load root
|
||||||
loadRoot(in, counter);
|
loadRoot(in, counter);
|
||||||
|
@ -414,8 +420,8 @@ public class FSImageFormat {
|
||||||
*/
|
*/
|
||||||
private void loadLocalNameINodes(long numFiles, DataInput in, Counter counter)
|
private void loadLocalNameINodes(long numFiles, DataInput in, Counter counter)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
|
assert NameNodeLayoutVersion.supports(
|
||||||
getLayoutVersion());
|
LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, getLayoutVersion());
|
||||||
assert numFiles > 0;
|
assert numFiles > 0;
|
||||||
|
|
||||||
// load root
|
// load root
|
||||||
|
@ -624,7 +630,8 @@ public class FSImageFormat {
|
||||||
final byte[] localName = FSImageSerialization.readLocalName(in);
|
final byte[] localName = FSImageSerialization.readLocalName(in);
|
||||||
INode inode = loadINode(localName, isSnapshotINode, in, counter);
|
INode inode = loadINode(localName, isSnapshotINode, in, counter);
|
||||||
if (updateINodeMap
|
if (updateINodeMap
|
||||||
&& LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) {
|
&& NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) {
|
||||||
namesystem.dir.addToInodeMap(inode);
|
namesystem.dir.addToInodeMap(inode);
|
||||||
}
|
}
|
||||||
return inode;
|
return inode;
|
||||||
|
@ -640,18 +647,21 @@ public class FSImageFormat {
|
||||||
INode loadINode(final byte[] localName, boolean isSnapshotINode,
|
INode loadINode(final byte[] localName, boolean isSnapshotINode,
|
||||||
DataInput in, Counter counter) throws IOException {
|
DataInput in, Counter counter) throws IOException {
|
||||||
final int imgVersion = getLayoutVersion();
|
final int imgVersion = getLayoutVersion();
|
||||||
if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.SNAPSHOT, imgVersion)) {
|
||||||
namesystem.getFSDirectory().verifyINodeName(localName);
|
namesystem.getFSDirectory().verifyINodeName(localName);
|
||||||
}
|
}
|
||||||
|
|
||||||
long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ?
|
long inodeId = NameNodeLayoutVersion.supports(
|
||||||
in.readLong() : namesystem.allocateNewInodeId();
|
LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong()
|
||||||
|
: namesystem.allocateNewInodeId();
|
||||||
|
|
||||||
final short replication = namesystem.getBlockManager().adjustReplication(
|
final short replication = namesystem.getBlockManager().adjustReplication(
|
||||||
in.readShort());
|
in.readShort());
|
||||||
final long modificationTime = in.readLong();
|
final long modificationTime = in.readLong();
|
||||||
long atime = 0;
|
long atime = 0;
|
||||||
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FILE_ACCESS_TIME, imgVersion)) {
|
||||||
atime = in.readLong();
|
atime = in.readLong();
|
||||||
}
|
}
|
||||||
final long blockSize = in.readLong();
|
final long blockSize = in.readLong();
|
||||||
|
@ -671,7 +681,8 @@ public class FSImageFormat {
|
||||||
String clientMachine = "";
|
String clientMachine = "";
|
||||||
boolean underConstruction = false;
|
boolean underConstruction = false;
|
||||||
FileDiffList fileDiffs = null;
|
FileDiffList fileDiffs = null;
|
||||||
if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.SNAPSHOT, imgVersion)) {
|
||||||
// read diffs
|
// read diffs
|
||||||
fileDiffs = SnapshotFSImageFormat.loadFileDiffList(in, this);
|
fileDiffs = SnapshotFSImageFormat.loadFileDiffList(in, this);
|
||||||
|
|
||||||
|
@ -710,14 +721,16 @@ public class FSImageFormat {
|
||||||
//read quotas
|
//read quotas
|
||||||
final long nsQuota = in.readLong();
|
final long nsQuota = in.readLong();
|
||||||
long dsQuota = -1L;
|
long dsQuota = -1L;
|
||||||
if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.DISKSPACE_QUOTA, imgVersion)) {
|
||||||
dsQuota = in.readLong();
|
dsQuota = in.readLong();
|
||||||
}
|
}
|
||||||
|
|
||||||
//read snapshot info
|
//read snapshot info
|
||||||
boolean snapshottable = false;
|
boolean snapshottable = false;
|
||||||
boolean withSnapshot = false;
|
boolean withSnapshot = false;
|
||||||
if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.SNAPSHOT, imgVersion)) {
|
||||||
snapshottable = in.readBoolean();
|
snapshottable = in.readBoolean();
|
||||||
if (!snapshottable) {
|
if (!snapshottable) {
|
||||||
withSnapshot = in.readBoolean();
|
withSnapshot = in.readBoolean();
|
||||||
|
@ -780,7 +793,8 @@ public class FSImageFormat {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final int layoutVersion = getLayoutVersion();
|
final int layoutVersion = getLayoutVersion();
|
||||||
|
|
||||||
if (!LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
|
||||||
return loadINodeWithLocalName(true, in, false).asFile();
|
return loadINodeWithLocalName(true, in, false).asFile();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -801,7 +815,8 @@ public class FSImageFormat {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final int layoutVersion = getLayoutVersion();
|
final int layoutVersion = getLayoutVersion();
|
||||||
|
|
||||||
if (!LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
|
||||||
return loadINodeWithLocalName(true, in, false).asDirectory();
|
return loadINodeWithLocalName(true, in, false).asDirectory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -836,7 +851,8 @@ public class FSImageFormat {
|
||||||
INodeFile oldnode = null;
|
INodeFile oldnode = null;
|
||||||
boolean inSnapshot = false;
|
boolean inSnapshot = false;
|
||||||
if (path != null && FSDirectory.isReservedName(path) &&
|
if (path != null && FSDirectory.isReservedName(path) &&
|
||||||
LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) {
|
NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) {
|
||||||
// TODO: for HDFS-5428, we use reserved path for those INodeFileUC in
|
// TODO: for HDFS-5428, we use reserved path for those INodeFileUC in
|
||||||
// snapshot. If we support INode ID in the layout version, we can use
|
// snapshot. If we support INode ID in the layout version, we can use
|
||||||
// the inode id to find the oldnode.
|
// the inode id to find the oldnode.
|
||||||
|
@ -869,7 +885,8 @@ public class FSImageFormat {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
int imgVersion = getLayoutVersion();
|
int imgVersion = getLayoutVersion();
|
||||||
|
|
||||||
if (!LayoutVersion.supports(Feature.DELEGATION_TOKEN, imgVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.DELEGATION_TOKEN, imgVersion)) {
|
||||||
//SecretManagerState is not available.
|
//SecretManagerState is not available.
|
||||||
//This must not happen if security is turned on.
|
//This must not happen if security is turned on.
|
||||||
return;
|
return;
|
||||||
|
@ -879,7 +896,8 @@ public class FSImageFormat {
|
||||||
|
|
||||||
private void loadCacheManagerState(DataInput in) throws IOException {
|
private void loadCacheManagerState(DataInput in) throws IOException {
|
||||||
int imgVersion = getLayoutVersion();
|
int imgVersion = getLayoutVersion();
|
||||||
if (!LayoutVersion.supports(Feature.CACHING, imgVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.CACHING, imgVersion)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
namesystem.getCacheManager().loadStateCompat(in);
|
namesystem.getCacheManager().loadStateCompat(in);
|
||||||
|
@ -993,7 +1011,7 @@ public class FSImageFormat {
|
||||||
DigestOutputStream fos = new DigestOutputStream(fout, digester);
|
DigestOutputStream fos = new DigestOutputStream(fout, digester);
|
||||||
DataOutputStream out = new DataOutputStream(fos);
|
DataOutputStream out = new DataOutputStream(fos);
|
||||||
try {
|
try {
|
||||||
out.writeInt(HdfsConstants.LAYOUT_VERSION);
|
out.writeInt(HdfsConstants.NAMENODE_LAYOUT_VERSION);
|
||||||
LayoutFlags.write(out);
|
LayoutFlags.write(out);
|
||||||
// We use the non-locked version of getNamespaceInfo here since
|
// We use the non-locked version of getNamespaceInfo here since
|
||||||
// the coordinating thread of saveNamespace already has read-locked
|
// the coordinating thread of saveNamespace already has read-locked
|
||||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
|
@ -118,8 +117,9 @@ public class FSImageSerialization {
|
||||||
DataInput in, FSNamesystem fsNamesys, int imgVersion)
|
DataInput in, FSNamesystem fsNamesys, int imgVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
byte[] name = readBytes(in);
|
byte[] name = readBytes(in);
|
||||||
long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ? in
|
long inodeId = NameNodeLayoutVersion.supports(
|
||||||
.readLong() : fsNamesys.allocateNewInodeId();
|
LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong()
|
||||||
|
: fsNamesys.allocateNewInodeId();
|
||||||
short blockReplication = in.readShort();
|
short blockReplication = in.readShort();
|
||||||
long modificationTime = in.readLong();
|
long modificationTime = in.readLong();
|
||||||
long preferredBlockSize = in.readLong();
|
long preferredBlockSize = in.readLong();
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
|
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
@ -531,7 +532,7 @@ public class FileJournalManager implements JournalManager {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getJournalCTime() throws IOException {
|
public long getJournalCTime() throws IOException {
|
||||||
StorageInfo sInfo = new StorageInfo();
|
StorageInfo sInfo = new StorageInfo(NodeType.NAME_NODE);
|
||||||
sInfo.readProperties(sd);
|
sInfo.readProperties(sd);
|
||||||
return sInfo.getCTime();
|
return sInfo.getCTime();
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
|
@ -533,7 +532,7 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
*/
|
*/
|
||||||
public void format(NamespaceInfo nsInfo) throws IOException {
|
public void format(NamespaceInfo nsInfo) throws IOException {
|
||||||
Preconditions.checkArgument(nsInfo.getLayoutVersion() == 0 ||
|
Preconditions.checkArgument(nsInfo.getLayoutVersion() == 0 ||
|
||||||
nsInfo.getLayoutVersion() == HdfsConstants.LAYOUT_VERSION,
|
nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION,
|
||||||
"Bad layout version: %s", nsInfo.getLayoutVersion());
|
"Bad layout version: %s", nsInfo.getLayoutVersion());
|
||||||
|
|
||||||
this.setStorageInfo(nsInfo);
|
this.setStorageInfo(nsInfo);
|
||||||
|
@ -552,7 +551,7 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
}
|
}
|
||||||
|
|
||||||
public void format() throws IOException {
|
public void format() throws IOException {
|
||||||
this.layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
this.layoutVersion = HdfsConstants.NAMENODE_LAYOUT_VERSION;
|
||||||
for (Iterator<StorageDirectory> it =
|
for (Iterator<StorageDirectory> it =
|
||||||
dirIterator(); it.hasNext();) {
|
dirIterator(); it.hasNext();) {
|
||||||
StorageDirectory sd = it.next();
|
StorageDirectory sd = it.next();
|
||||||
|
@ -589,7 +588,8 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set Block pool ID in version with federation support
|
// Set Block pool ID in version with federation support
|
||||||
if (versionSupportsFederation()) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, getLayoutVersion())) {
|
||||||
String sbpid = props.getProperty("blockpoolID");
|
String sbpid = props.getProperty("blockpoolID");
|
||||||
setBlockPoolID(sd.getRoot(), sbpid);
|
setBlockPoolID(sd.getRoot(), sbpid);
|
||||||
}
|
}
|
||||||
|
@ -614,7 +614,7 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
* This should only be used during upgrades.
|
* This should only be used during upgrades.
|
||||||
*/
|
*/
|
||||||
String getDeprecatedProperty(String prop) {
|
String getDeprecatedProperty(String prop) {
|
||||||
assert getLayoutVersion() > HdfsConstants.LAYOUT_VERSION :
|
assert getLayoutVersion() > HdfsConstants.NAMENODE_LAYOUT_VERSION :
|
||||||
"getDeprecatedProperty should only be done when loading " +
|
"getDeprecatedProperty should only be done when loading " +
|
||||||
"storage from past versions during upgrade.";
|
"storage from past versions during upgrade.";
|
||||||
return deprecatedProperties.get(prop);
|
return deprecatedProperties.get(prop);
|
||||||
|
@ -636,7 +636,8 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
super.setPropertiesFromFields(props, sd);
|
super.setPropertiesFromFields(props, sd);
|
||||||
// Set blockpoolID in version with federation support
|
// Set blockpoolID in version with federation support
|
||||||
if (versionSupportsFederation()) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, getLayoutVersion())) {
|
||||||
props.setProperty("blockpoolID", blockpoolID);
|
props.setProperty("blockpoolID", blockpoolID);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -808,7 +809,8 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
// If upgrade from a release that does not support federation,
|
// If upgrade from a release that does not support federation,
|
||||||
// if clusterId is provided in the startupOptions use it.
|
// if clusterId is provided in the startupOptions use it.
|
||||||
// Else generate a new cluster ID
|
// Else generate a new cluster ID
|
||||||
if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
|
if (!NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
|
||||||
if (startOpt.getClusterId() == null) {
|
if (startOpt.getClusterId() == null) {
|
||||||
startOpt.setClusterId(newClusterID());
|
startOpt.setClusterId(newClusterID());
|
||||||
}
|
}
|
||||||
|
@ -1005,7 +1007,8 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
// (ie edits_<txnid>) then use the new inspector, which will ignore
|
// (ie edits_<txnid>) then use the new inspector, which will ignore
|
||||||
// the old format dirs.
|
// the old format dirs.
|
||||||
FSImageStorageInspector inspector;
|
FSImageStorageInspector inspector;
|
||||||
if (LayoutVersion.supports(Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
|
||||||
inspector = new FSImageTransactionalStorageInspector();
|
inspector = new FSImageTransactionalStorageInspector();
|
||||||
} else {
|
} else {
|
||||||
inspector = new FSImagePreTransactionalStorageInspector();
|
inspector = new FSImagePreTransactionalStorageInspector();
|
||||||
|
|
|
@ -0,0 +1,98 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.SortedSet;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
|
||||||
|
|
||||||
|
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class NameNodeLayoutVersion {
|
||||||
|
/** Build layout version and corresponding feature matrix */
|
||||||
|
public final static Map<Integer, SortedSet<LayoutFeature>> FEATURES
|
||||||
|
= new HashMap<Integer, SortedSet<LayoutFeature>>();
|
||||||
|
|
||||||
|
public static final int CURRENT_LAYOUT_VERSION
|
||||||
|
= LayoutVersion.getCurrentLayoutVersion(FEATURES, Feature.values());
|
||||||
|
|
||||||
|
static {
|
||||||
|
LayoutVersion.updateMap(FEATURES, LayoutVersion.Feature.values());
|
||||||
|
LayoutVersion.updateMap(FEATURES, NameNodeLayoutVersion.Feature.values());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static SortedSet<LayoutFeature> getFeatures(int lv) {
|
||||||
|
return FEATURES.get(lv);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean supports(final LayoutFeature f, final int lv) {
|
||||||
|
return LayoutVersion.supports(FEATURES, f, lv);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enums for features that change the layout version.
|
||||||
|
* <br><br>
|
||||||
|
* To add a new layout version:
|
||||||
|
* <ul>
|
||||||
|
* <li>Define a new enum constant with a short enum name, the new layout version
|
||||||
|
* and description of the added feature.</li>
|
||||||
|
* <li>When adding a layout version with an ancestor that is not same as
|
||||||
|
* its immediate predecessor, use the constructor where a specific ancestor
|
||||||
|
* can be passed.
|
||||||
|
* </li>
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
public static enum Feature implements LayoutFeature {
|
||||||
|
ROLLING_UPGRADE_MARKER(-52, "Upgrade marker for rolling upgrade");
|
||||||
|
|
||||||
|
private final FeatureInfo info;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Feature that is added at layout version {@code lv} - 1.
|
||||||
|
* @param lv new layout version with the addition of this feature
|
||||||
|
* @param description description of the feature
|
||||||
|
*/
|
||||||
|
Feature(final int lv, final String description) {
|
||||||
|
this(lv, lv + 1, description, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* NameNode feature that is added at layout version {@code ancestoryLV}.
|
||||||
|
* @param lv new layout version with the addition of this feature
|
||||||
|
* @param ancestorLV layout version from which the new lv is derived from.
|
||||||
|
* @param description description of the feature
|
||||||
|
* @param reserved true when this is a layout version reserved for previous
|
||||||
|
* versions
|
||||||
|
* @param features set of features that are to be enabled for this version
|
||||||
|
*/
|
||||||
|
Feature(final int lv, final int ancestorLV, final String description,
|
||||||
|
boolean reserved, Feature... features) {
|
||||||
|
info = new FeatureInfo(lv, ancestorLV, description, reserved, features);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FeatureInfo getInfo() {
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -973,7 +973,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
@Override // DatanodeProtocol
|
@Override // DatanodeProtocol
|
||||||
public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
|
public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
verifyLayoutVersion(nodeReg.getVersion());
|
|
||||||
verifySoftwareVersion(nodeReg);
|
verifySoftwareVersion(nodeReg);
|
||||||
namesystem.registerDatanode(nodeReg);
|
namesystem.registerDatanode(nodeReg);
|
||||||
return nodeReg;
|
return nodeReg;
|
||||||
|
@ -1071,7 +1070,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
* @throws UnregisteredNodeException if the registration is invalid
|
* @throws UnregisteredNodeException if the registration is invalid
|
||||||
*/
|
*/
|
||||||
void verifyRequest(NodeRegistration nodeReg) throws IOException {
|
void verifyRequest(NodeRegistration nodeReg) throws IOException {
|
||||||
verifyLayoutVersion(nodeReg.getVersion());
|
|
||||||
if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
|
if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
|
||||||
LOG.warn("Invalid registrationID - expected: "
|
LOG.warn("Invalid registrationID - expected: "
|
||||||
+ namesystem.getRegistrationID() + " received: "
|
+ namesystem.getRegistrationID() + " received: "
|
||||||
|
@ -1147,8 +1145,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
void verifyLayoutVersion(int version) throws IOException {
|
void verifyLayoutVersion(int version) throws IOException {
|
||||||
if (version != HdfsConstants.LAYOUT_VERSION)
|
if (version != HdfsConstants.NAMENODE_LAYOUT_VERSION)
|
||||||
throw new IncorrectVersionException(version, "data node");
|
throw new IncorrectVersionException(
|
||||||
|
HdfsConstants.NAMENODE_LAYOUT_VERSION, version, "data node");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifySoftwareVersion(DatanodeRegistration dnReg)
|
private void verifySoftwareVersion(DatanodeRegistration dnReg)
|
||||||
|
|
|
@ -511,8 +511,10 @@ public class SecondaryNameNode implements Runnable {
|
||||||
boolean loadImage = false;
|
boolean loadImage = false;
|
||||||
boolean isFreshCheckpointer = (checkpointImage.getNamespaceID() == 0);
|
boolean isFreshCheckpointer = (checkpointImage.getNamespaceID() == 0);
|
||||||
boolean isSameCluster =
|
boolean isSameCluster =
|
||||||
(dstStorage.versionSupportsFederation() && sig.isSameCluster(checkpointImage)) ||
|
(dstStorage.versionSupportsFederation(NameNodeLayoutVersion.FEATURES)
|
||||||
(!dstStorage.versionSupportsFederation() && sig.namespaceIdMatches(checkpointImage));
|
&& sig.isSameCluster(checkpointImage)) ||
|
||||||
|
(!dstStorage.versionSupportsFederation(NameNodeLayoutVersion.FEATURES)
|
||||||
|
&& sig.namespaceIdMatches(checkpointImage));
|
||||||
if (isFreshCheckpointer ||
|
if (isFreshCheckpointer ||
|
||||||
(isSameCluster &&
|
(isSameCluster &&
|
||||||
!sig.storageVersionMatches(checkpointImage.getStorage()))) {
|
!sig.storageVersionMatches(checkpointImage.getStorage()))) {
|
||||||
|
|
|
@ -153,9 +153,9 @@ public class BootstrapStandby implements Tool, Configurable {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!checkLayoutVersion(nsInfo)) {
|
if (!checkLayoutVersion(nsInfo)) {
|
||||||
LOG.fatal("Layout version on remote node (" +
|
LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion()
|
||||||
nsInfo.getLayoutVersion() + ") does not match " +
|
+ ") does not match " + "this node's layout version ("
|
||||||
"this node's layout version (" + HdfsConstants.LAYOUT_VERSION + ")");
|
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + ")");
|
||||||
return ERR_CODE_INVALID_VERSION;
|
return ERR_CODE_INVALID_VERSION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,7 +257,7 @@ public class BootstrapStandby implements Tool, Configurable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
|
private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
|
||||||
return (nsInfo.getLayoutVersion() == HdfsConstants.LAYOUT_VERSION);
|
return (nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void parseConfAndFindOtherNN() throws IOException {
|
private void parseConfAndFindOtherNN() throws IOException {
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||||
import org.apache.hadoop.util.VersionInfo;
|
import org.apache.hadoop.util.VersionInfo;
|
||||||
|
|
||||||
|
@ -41,13 +42,14 @@ public class NamespaceInfo extends StorageInfo {
|
||||||
String softwareVersion;
|
String softwareVersion;
|
||||||
|
|
||||||
public NamespaceInfo() {
|
public NamespaceInfo() {
|
||||||
super();
|
super(NodeType.NAME_NODE);
|
||||||
buildVersion = null;
|
buildVersion = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public NamespaceInfo(int nsID, String clusterID, String bpID,
|
public NamespaceInfo(int nsID, String clusterID, String bpID,
|
||||||
long cT, String buildVersion, String softwareVersion) {
|
long cT, String buildVersion, String softwareVersion) {
|
||||||
super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
|
super(HdfsConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT,
|
||||||
|
NodeType.NAME_NODE);
|
||||||
blockPoolID = bpID;
|
blockPoolID = bpID;
|
||||||
this.buildVersion = buildVersion;
|
this.buildVersion = buildVersion;
|
||||||
this.softwareVersion = softwareVersion;
|
this.softwareVersion = softwareVersion;
|
||||||
|
|
|
@ -30,10 +30,10 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
|
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
|
@ -158,7 +158,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
imageVersion = in.readInt();
|
imageVersion = in.readInt();
|
||||||
if( !canLoadVersion(imageVersion))
|
if( !canLoadVersion(imageVersion))
|
||||||
throw new IOException("Cannot process fslayout version " + imageVersion);
|
throw new IOException("Cannot process fslayout version " + imageVersion);
|
||||||
if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_LAYOUT_FLAGS, imageVersion)) {
|
||||||
LayoutFlags.read(in);
|
LayoutFlags.read(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,22 +170,25 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
|
|
||||||
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
|
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.SEQUENTIAL_BLOCK_ID, imageVersion)) {
|
||||||
v.visit(ImageElement.GENERATION_STAMP_V2, in.readLong());
|
v.visit(ImageElement.GENERATION_STAMP_V2, in.readLong());
|
||||||
v.visit(ImageElement.GENERATION_STAMP_V1_LIMIT, in.readLong());
|
v.visit(ImageElement.GENERATION_STAMP_V1_LIMIT, in.readLong());
|
||||||
v.visit(ImageElement.LAST_ALLOCATED_BLOCK_ID, in.readLong());
|
v.visit(ImageElement.LAST_ALLOCATED_BLOCK_ID, in.readLong());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.STORED_TXIDS, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.STORED_TXIDS, imageVersion)) {
|
||||||
v.visit(ImageElement.TRANSACTION_ID, in.readLong());
|
v.visit(ImageElement.TRANSACTION_ID, in.readLong());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, imageVersion)) {
|
||||||
v.visit(ImageElement.LAST_INODE_ID, in.readLong());
|
v.visit(ImageElement.LAST_INODE_ID, in.readLong());
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
|
boolean supportSnapshot = NameNodeLayoutVersion.supports(
|
||||||
imageVersion);
|
LayoutVersion.Feature.SNAPSHOT, imageVersion);
|
||||||
if (supportSnapshot) {
|
if (supportSnapshot) {
|
||||||
v.visit(ImageElement.SNAPSHOT_COUNTER, in.readInt());
|
v.visit(ImageElement.SNAPSHOT_COUNTER, in.readInt());
|
||||||
int numSnapshots = in.readInt();
|
int numSnapshots = in.readInt();
|
||||||
|
@ -194,7 +198,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FSIMAGE_COMPRESSION, imageVersion)) {
|
||||||
boolean isCompressed = in.readBoolean();
|
boolean isCompressed = in.readBoolean();
|
||||||
v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
|
v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
|
||||||
if (isCompressed) {
|
if (isCompressed) {
|
||||||
|
@ -216,11 +221,13 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
|
|
||||||
processINodesUC(in, v, skipBlocks);
|
processINodesUC(in, v, skipBlocks);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.DELEGATION_TOKEN, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.DELEGATION_TOKEN, imageVersion)) {
|
||||||
processDelegationTokens(in, v);
|
processDelegationTokens(in, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.CACHING, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.CACHING, imageVersion)) {
|
||||||
processCacheManagerState(in, v);
|
processCacheManagerState(in, v);
|
||||||
}
|
}
|
||||||
v.leaveEnclosingElement(); // FSImage
|
v.leaveEnclosingElement(); // FSImage
|
||||||
|
@ -323,7 +330,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
String n = new String(name, "UTF8");
|
String n = new String(name, "UTF8");
|
||||||
v.visit(ImageElement.INODE_PATH, n);
|
v.visit(ImageElement.INODE_PATH, n);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.ADD_INODE_ID, imageVersion)) {
|
||||||
long inodeId = in.readLong();
|
long inodeId = in.readLong();
|
||||||
v.visit(ImageElement.INODE_ID, inodeId);
|
v.visit(ImageElement.INODE_ID, inodeId);
|
||||||
}
|
}
|
||||||
|
@ -443,7 +451,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
v.visitEnclosingElement(ImageElement.INODES,
|
v.visitEnclosingElement(ImageElement.INODES,
|
||||||
ImageElement.NUM_INODES, numInodes);
|
ImageElement.NUM_INODES, numInodes);
|
||||||
|
|
||||||
if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
|
||||||
if (!supportSnapshot) {
|
if (!supportSnapshot) {
|
||||||
processLocalNameINodes(in, v, numInodes, skipBlocks);
|
processLocalNameINodes(in, v, numInodes, skipBlocks);
|
||||||
} else {
|
} else {
|
||||||
|
@ -584,7 +593,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
if (!useRoot) {
|
if (!useRoot) {
|
||||||
if (in.readBoolean()) {
|
if (in.readBoolean()) {
|
||||||
v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES);
|
v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES);
|
||||||
if (LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
|
||||||
processINodeDirectoryAttributes(in, v, currentINodeName);
|
processINodeDirectoryAttributes(in, v, currentINodeName);
|
||||||
} else {
|
} else {
|
||||||
processINode(in, v, true, currentINodeName, true);
|
processINode(in, v, true, currentINodeName, true);
|
||||||
|
@ -678,10 +688,10 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
private void processINode(DataInputStream in, ImageVisitor v,
|
private void processINode(DataInputStream in, ImageVisitor v,
|
||||||
boolean skipBlocks, String parentName, boolean isSnapshotCopy)
|
boolean skipBlocks, String parentName, boolean isSnapshotCopy)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
boolean supportSnapshot =
|
boolean supportSnapshot = NameNodeLayoutVersion.supports(
|
||||||
LayoutVersion.supports(Feature.SNAPSHOT, imageVersion);
|
LayoutVersion.Feature.SNAPSHOT, imageVersion);
|
||||||
boolean supportInodeId =
|
boolean supportInodeId = NameNodeLayoutVersion.supports(
|
||||||
LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion);
|
LayoutVersion.Feature.ADD_INODE_ID, imageVersion);
|
||||||
|
|
||||||
v.visitEnclosingElement(ImageElement.INODE);
|
v.visitEnclosingElement(ImageElement.INODE);
|
||||||
final String pathName = readINodePath(in, parentName);
|
final String pathName = readINodePath(in, parentName);
|
||||||
|
@ -694,7 +704,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
}
|
}
|
||||||
v.visit(ImageElement.REPLICATION, in.readShort());
|
v.visit(ImageElement.REPLICATION, in.readShort());
|
||||||
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
|
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
|
||||||
if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
|
if(NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FILE_ACCESS_TIME, imageVersion))
|
||||||
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
|
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
|
||||||
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
|
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
|
||||||
int numBlocks = in.readInt();
|
int numBlocks = in.readInt();
|
||||||
|
@ -723,7 +734,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
dirNodeMap.put(inodeId, pathName);
|
dirNodeMap.put(inodeId, pathName);
|
||||||
}
|
}
|
||||||
v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
||||||
if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.DISKSPACE_QUOTA, imageVersion))
|
||||||
v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
||||||
if (supportSnapshot) {
|
if (supportSnapshot) {
|
||||||
boolean snapshottable = in.readBoolean();
|
boolean snapshottable = in.readBoolean();
|
||||||
|
@ -770,7 +782,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
v.visit(ImageElement.INODE_PATH, pathName);
|
v.visit(ImageElement.INODE_PATH, pathName);
|
||||||
processPermission(in, v);
|
processPermission(in, v);
|
||||||
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
|
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
|
||||||
if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion)) {
|
if(NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FILE_ACCESS_TIME, imageVersion)) {
|
||||||
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
|
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -799,7 +812,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
|
v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
|
||||||
if (in.readBoolean()) {
|
if (in.readBoolean()) {
|
||||||
v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
|
v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
|
||||||
if (LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
|
if (NameNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
|
||||||
processINodeFileAttributes(in, v, currentINodeName);
|
processINodeFileAttributes(in, v, currentINodeName);
|
||||||
} else {
|
} else {
|
||||||
processINode(in, v, true, currentINodeName, true);
|
processINode(in, v, true, currentINodeName, true);
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
@ -884,8 +885,8 @@ public class DFSTestUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeRegistration getLocalDatanodeRegistration() {
|
public static DatanodeRegistration getLocalDatanodeRegistration() {
|
||||||
return new DatanodeRegistration(getLocalDatanodeID(),
|
return new DatanodeRegistration(getLocalDatanodeID(), new StorageInfo(
|
||||||
new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion());
|
NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Copy one file's contents into the other **/
|
/** Copy one file's contents into the other **/
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
@ -190,10 +191,11 @@ public class TestDFSRollback {
|
||||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||||
// Put newer layout version in current.
|
// Put newer layout version in current.
|
||||||
storageInfo = new StorageInfo(
|
storageInfo = new StorageInfo(
|
||||||
UpgradeUtilities.getCurrentLayoutVersion()-1,
|
HdfsConstants.DATANODE_LAYOUT_VERSION - 1,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
||||||
UpgradeUtilities.getCurrentClusterID(cluster),
|
UpgradeUtilities.getCurrentClusterID(cluster),
|
||||||
UpgradeUtilities.getCurrentFsscTime(cluster));
|
UpgradeUtilities.getCurrentFsscTime(cluster),
|
||||||
|
NodeType.DATA_NODE);
|
||||||
|
|
||||||
// Overwrite VERSION file in the current directory of
|
// Overwrite VERSION file in the current directory of
|
||||||
// volume directories and block pool slice directories
|
// volume directories and block pool slice directories
|
||||||
|
@ -250,7 +252,8 @@ public class TestDFSRollback {
|
||||||
storageInfo = new StorageInfo(Integer.MIN_VALUE,
|
storageInfo = new StorageInfo(Integer.MIN_VALUE,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
||||||
UpgradeUtilities.getCurrentClusterID(cluster),
|
UpgradeUtilities.getCurrentClusterID(cluster),
|
||||||
UpgradeUtilities.getCurrentFsscTime(cluster));
|
UpgradeUtilities.getCurrentFsscTime(cluster),
|
||||||
|
NodeType.DATA_NODE);
|
||||||
|
|
||||||
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
|
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
|
||||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||||
|
@ -274,9 +277,10 @@ public class TestDFSRollback {
|
||||||
|
|
||||||
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
|
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
|
||||||
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
|
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
|
||||||
storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
|
storageInfo = new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
||||||
UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
|
UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE,
|
||||||
|
NodeType.DATA_NODE);
|
||||||
|
|
||||||
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
|
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
|
||||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||||
|
@ -320,7 +324,7 @@ public class TestDFSRollback {
|
||||||
storageInfo = new StorageInfo(1,
|
storageInfo = new StorageInfo(1,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(null),
|
UpgradeUtilities.getCurrentNamespaceID(null),
|
||||||
UpgradeUtilities.getCurrentClusterID(null),
|
UpgradeUtilities.getCurrentClusterID(null),
|
||||||
UpgradeUtilities.getCurrentFsscTime(null));
|
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
|
||||||
|
|
||||||
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
|
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
|
||||||
storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class TestDFSStartupVersions {
|
||||||
StorageData(int layoutVersion, int namespaceId, String clusterId,
|
StorageData(int layoutVersion, int namespaceId, String clusterId,
|
||||||
long cTime, String bpid) {
|
long cTime, String bpid) {
|
||||||
storageInfo = new StorageInfo(layoutVersion, namespaceId, clusterId,
|
storageInfo = new StorageInfo(layoutVersion, namespaceId, clusterId,
|
||||||
cTime);
|
cTime, NodeType.DATA_NODE);
|
||||||
blockPoolId = bpid;
|
blockPoolId = bpid;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ public class TestDFSStartupVersions {
|
||||||
*/
|
*/
|
||||||
private StorageData[] initializeVersions() throws Exception {
|
private StorageData[] initializeVersions() throws Exception {
|
||||||
int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION;
|
int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION;
|
||||||
int layoutVersionCur = UpgradeUtilities.getCurrentLayoutVersion();
|
int layoutVersionCur = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
||||||
int layoutVersionNew = Integer.MIN_VALUE;
|
int layoutVersionNew = Integer.MIN_VALUE;
|
||||||
int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);
|
int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);
|
||||||
int namespaceIdOld = Integer.MIN_VALUE;
|
int namespaceIdOld = Integer.MIN_VALUE;
|
||||||
|
@ -200,7 +200,7 @@ public class TestDFSStartupVersions {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// check #3
|
// check #3
|
||||||
int softwareLV = HdfsConstants.LAYOUT_VERSION; // will also be Namenode's LV
|
int softwareLV = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
||||||
int storedLV = datanodeVer.getLayoutVersion();
|
int storedLV = datanodeVer.getLayoutVersion();
|
||||||
if (softwareLV == storedLV &&
|
if (softwareLV == storedLV &&
|
||||||
datanodeVer.getCTime() == namenodeVer.getCTime())
|
datanodeVer.getCTime() == namenodeVer.getCTime())
|
||||||
|
@ -252,7 +252,7 @@ public class TestDFSStartupVersions {
|
||||||
.startupOption(StartupOption.REGULAR)
|
.startupOption(StartupOption.REGULAR)
|
||||||
.build();
|
.build();
|
||||||
StorageData nameNodeVersion = new StorageData(
|
StorageData nameNodeVersion = new StorageData(
|
||||||
UpgradeUtilities.getCurrentLayoutVersion(),
|
HdfsConstants.NAMENODE_LAYOUT_VERSION,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
||||||
UpgradeUtilities.getCurrentClusterID(cluster),
|
UpgradeUtilities.getCurrentClusterID(cluster),
|
||||||
UpgradeUtilities.getCurrentFsscTime(cluster),
|
UpgradeUtilities.getCurrentFsscTime(cluster),
|
||||||
|
|
|
@ -34,6 +34,8 @@ import java.util.regex.Pattern;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
|
@ -265,7 +267,7 @@ public class TestDFSUpgrade {
|
||||||
storageInfo = new StorageInfo(Integer.MIN_VALUE,
|
storageInfo = new StorageInfo(Integer.MIN_VALUE,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
||||||
UpgradeUtilities.getCurrentClusterID(cluster),
|
UpgradeUtilities.getCurrentClusterID(cluster),
|
||||||
UpgradeUtilities.getCurrentFsscTime(cluster));
|
UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
|
||||||
|
|
||||||
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
|
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
|
||||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||||
|
@ -280,9 +282,10 @@ public class TestDFSUpgrade {
|
||||||
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
|
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
|
||||||
cluster = createCluster();
|
cluster = createCluster();
|
||||||
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
|
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
|
||||||
storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
|
storageInfo = new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
||||||
UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
|
UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE,
|
||||||
|
NodeType.DATA_NODE);
|
||||||
|
|
||||||
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
|
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
|
||||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||||
|
@ -321,7 +324,7 @@ public class TestDFSUpgrade {
|
||||||
storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
|
storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(null),
|
UpgradeUtilities.getCurrentNamespaceID(null),
|
||||||
UpgradeUtilities.getCurrentClusterID(null),
|
UpgradeUtilities.getCurrentClusterID(null),
|
||||||
UpgradeUtilities.getCurrentFsscTime(null));
|
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
|
||||||
|
|
||||||
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
|
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
|
||||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||||
|
@ -334,7 +337,7 @@ public class TestDFSUpgrade {
|
||||||
storageInfo = new StorageInfo(Integer.MIN_VALUE,
|
storageInfo = new StorageInfo(Integer.MIN_VALUE,
|
||||||
UpgradeUtilities.getCurrentNamespaceID(null),
|
UpgradeUtilities.getCurrentNamespaceID(null),
|
||||||
UpgradeUtilities.getCurrentClusterID(null),
|
UpgradeUtilities.getCurrentClusterID(null),
|
||||||
UpgradeUtilities.getCurrentFsscTime(null));
|
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
|
||||||
|
|
||||||
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
|
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
|
||||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||||
|
|
|
@ -179,7 +179,7 @@ public class TestDatanodeRegistration {
|
||||||
.getCTime();
|
.getCTime();
|
||||||
StorageInfo mockStorageInfo = mock(StorageInfo.class);
|
StorageInfo mockStorageInfo = mock(StorageInfo.class);
|
||||||
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
||||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
|
doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo)
|
||||||
.getLayoutVersion();
|
.getLayoutVersion();
|
||||||
DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
|
DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
|
||||||
mockStorageInfo, null, VersionInfo.getVersion());
|
mockStorageInfo, null, VersionInfo.getVersion());
|
||||||
|
@ -224,7 +224,7 @@ public class TestDatanodeRegistration {
|
||||||
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
||||||
|
|
||||||
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
|
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
|
||||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
|
doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
|
||||||
doReturn(123).when(mockDnReg).getXferPort();
|
doReturn(123).when(mockDnReg).getXferPort();
|
||||||
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
|
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
|
||||||
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
|
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
|
||||||
|
@ -272,7 +272,7 @@ public class TestDatanodeRegistration {
|
||||||
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
||||||
|
|
||||||
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
|
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
|
||||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
|
doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
|
||||||
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
|
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
|
||||||
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
|
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
|
||||||
|
|
||||||
|
|
|
@ -40,13 +40,13 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
|
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
|
@ -471,7 +471,8 @@ public class UpgradeUtilities {
|
||||||
public static void createBlockPoolVersionFile(File bpDir,
|
public static void createBlockPoolVersionFile(File bpDir,
|
||||||
StorageInfo version, String bpid) throws IOException {
|
StorageInfo version, String bpid) throws IOException {
|
||||||
// Create block pool version files
|
// Create block pool version files
|
||||||
if (LayoutVersion.supports(Feature.FEDERATION, version.layoutVersion)) {
|
if (DataNodeLayoutVersion.supports(
|
||||||
|
LayoutVersion.Feature.FEDERATION, version.layoutVersion)) {
|
||||||
File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
|
File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
|
||||||
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
|
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
|
||||||
bpid);
|
bpid);
|
||||||
|
@ -514,8 +515,8 @@ public class UpgradeUtilities {
|
||||||
* Return the layout version inherent in the current version
|
* Return the layout version inherent in the current version
|
||||||
* of the Namenode, whether it is running or not.
|
* of the Namenode, whether it is running or not.
|
||||||
*/
|
*/
|
||||||
public static int getCurrentLayoutVersion() {
|
public static int getCurrentNameNodeLayoutVersion() {
|
||||||
return HdfsConstants.LAYOUT_VERSION;
|
return HdfsConstants.NAMENODE_LAYOUT_VERSION;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -20,9 +20,13 @@ package org.apache.hadoop.hdfs.protocol;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.util.EnumSet;
|
import java.util.SortedSet;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -36,7 +40,7 @@ public class TestLayoutVersion {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testFeaturesFromAncestorSupported() {
|
public void testFeaturesFromAncestorSupported() {
|
||||||
for (Feature f : Feature.values()) {
|
for (LayoutFeature f : Feature.values()) {
|
||||||
validateFeatureList(f);
|
validateFeatureList(f);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,8 +50,8 @@ public class TestLayoutVersion {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testRelease203() {
|
public void testRelease203() {
|
||||||
assertTrue(LayoutVersion.supports(Feature.DELEGATION_TOKEN,
|
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.DELEGATION_TOKEN,
|
||||||
Feature.RESERVED_REL20_203.lv));
|
Feature.RESERVED_REL20_203.getInfo().getLayoutVersion()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -55,8 +59,8 @@ public class TestLayoutVersion {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testRelease204() {
|
public void testRelease204() {
|
||||||
assertTrue(LayoutVersion.supports(Feature.DELEGATION_TOKEN,
|
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.DELEGATION_TOKEN,
|
||||||
Feature.RESERVED_REL20_204.lv));
|
Feature.RESERVED_REL20_204.getInfo().getLayoutVersion()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -64,23 +68,42 @@ public class TestLayoutVersion {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testRelease1_2_0() {
|
public void testRelease1_2_0() {
|
||||||
assertTrue(LayoutVersion.supports(Feature.CONCAT,
|
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.CONCAT,
|
||||||
Feature.RESERVED_REL1_2_0.lv));
|
Feature.RESERVED_REL1_2_0.getInfo().getLayoutVersion()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test to make sure NameNode.Feature support previous features
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testNameNodeFeature() {
|
||||||
|
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.CACHING,
|
||||||
|
NameNodeLayoutVersion.Feature.ROLLING_UPGRADE_MARKER.getInfo().getLayoutVersion()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test to make sure DataNode.Feature support previous features
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDataNodeFeature() {
|
||||||
|
assertTrue(DataNodeLayoutVersion.supports(LayoutVersion.Feature.CACHING,
|
||||||
|
DataNodeLayoutVersion.Feature.FIRST_LAYOUT.getInfo().getLayoutVersion()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given feature {@code f}, ensures the layout version of that feature
|
* Given feature {@code f}, ensures the layout version of that feature
|
||||||
* supports all the features supported by it's ancestor.
|
* supports all the features supported by it's ancestor.
|
||||||
*/
|
*/
|
||||||
private void validateFeatureList(Feature f) {
|
private void validateFeatureList(LayoutFeature f) {
|
||||||
int lv = f.lv;
|
final FeatureInfo info = f.getInfo();
|
||||||
int ancestorLV = f.ancestorLV;
|
int lv = info.getLayoutVersion();
|
||||||
EnumSet<Feature> ancestorSet = LayoutVersion.map.get(ancestorLV);
|
int ancestorLV = info.getAncestorLayoutVersion();
|
||||||
|
SortedSet<LayoutFeature> ancestorSet = NameNodeLayoutVersion.getFeatures(ancestorLV);
|
||||||
assertNotNull(ancestorSet);
|
assertNotNull(ancestorSet);
|
||||||
for (Feature feature : ancestorSet) {
|
for (LayoutFeature feature : ancestorSet) {
|
||||||
assertTrue("LV " + lv + " does nto support " + feature
|
assertTrue("LV " + lv + " does nto support " + feature
|
||||||
+ " supported by the ancestor LV " + f.ancestorLV,
|
+ " supported by the ancestor LV " + info.getAncestorLayoutVersion(),
|
||||||
LayoutVersion.supports(feature, lv));
|
NameNodeLayoutVersion.supports(feature, lv));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockKey;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.*;
|
import org.apache.hadoop.hdfs.server.protocol.*;
|
||||||
|
@ -100,15 +101,15 @@ public class TestPBHelper {
|
||||||
PBHelper.convert(NamenodeRoleProto.NAMENODE));
|
PBHelper.convert(NamenodeRoleProto.NAMENODE));
|
||||||
}
|
}
|
||||||
|
|
||||||
private static StorageInfo getStorageInfo() {
|
private static StorageInfo getStorageInfo(NodeType type) {
|
||||||
return new StorageInfo(1, 2, "cid", 3);
|
return new StorageInfo(1, 2, "cid", 3, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConvertStoragInfo() {
|
public void testConvertStoragInfo() {
|
||||||
StorageInfo info = getStorageInfo();
|
StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
|
||||||
StorageInfoProto infoProto = PBHelper.convert(info);
|
StorageInfoProto infoProto = PBHelper.convert(info);
|
||||||
StorageInfo info2 = PBHelper.convert(infoProto);
|
StorageInfo info2 = PBHelper.convert(infoProto, NodeType.NAME_NODE);
|
||||||
assertEquals(info.getClusterID(), info2.getClusterID());
|
assertEquals(info.getClusterID(), info2.getClusterID());
|
||||||
assertEquals(info.getCTime(), info2.getCTime());
|
assertEquals(info.getCTime(), info2.getCTime());
|
||||||
assertEquals(info.getLayoutVersion(), info2.getLayoutVersion());
|
assertEquals(info.getLayoutVersion(), info2.getLayoutVersion());
|
||||||
|
@ -117,7 +118,7 @@ public class TestPBHelper {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConvertNamenodeRegistration() {
|
public void testConvertNamenodeRegistration() {
|
||||||
StorageInfo info = getStorageInfo();
|
StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
|
||||||
NamenodeRegistration reg = new NamenodeRegistration("address:999",
|
NamenodeRegistration reg = new NamenodeRegistration("address:999",
|
||||||
"http:1000", info, NamenodeRole.NAMENODE);
|
"http:1000", info, NamenodeRole.NAMENODE);
|
||||||
NamenodeRegistrationProto regProto = PBHelper.convert(reg);
|
NamenodeRegistrationProto regProto = PBHelper.convert(reg);
|
||||||
|
@ -243,8 +244,8 @@ public class TestPBHelper {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConvertCheckpointSignature() {
|
public void testConvertCheckpointSignature() {
|
||||||
CheckpointSignature s = new CheckpointSignature(getStorageInfo(), "bpid",
|
CheckpointSignature s = new CheckpointSignature(
|
||||||
100, 1);
|
getStorageInfo(NodeType.NAME_NODE), "bpid", 100, 1);
|
||||||
CheckpointSignatureProto sProto = PBHelper.convert(s);
|
CheckpointSignatureProto sProto = PBHelper.convert(s);
|
||||||
CheckpointSignature s1 = PBHelper.convert(sProto);
|
CheckpointSignature s1 = PBHelper.convert(sProto);
|
||||||
assertEquals(s.getBlockpoolID(), s1.getBlockpoolID());
|
assertEquals(s.getBlockpoolID(), s1.getBlockpoolID());
|
||||||
|
@ -515,7 +516,7 @@ public class TestPBHelper {
|
||||||
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
|
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
|
||||||
getBlockKey(1), keys);
|
getBlockKey(1), keys);
|
||||||
DatanodeRegistration reg = new DatanodeRegistration(dnId,
|
DatanodeRegistration reg = new DatanodeRegistration(dnId,
|
||||||
new StorageInfo(), expKeys, "3.0.0");
|
new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
|
||||||
DatanodeRegistrationProto proto = PBHelper.convert(reg);
|
DatanodeRegistrationProto proto = PBHelper.convert(reg);
|
||||||
DatanodeRegistration reg2 = PBHelper.convert(proto);
|
DatanodeRegistration reg2 = PBHelper.convert(proto);
|
||||||
compare(reg.getStorageInfo(), reg2.getStorageInfo());
|
compare(reg.getStorageInfo(), reg2.getStorageInfo());
|
||||||
|
|
|
@ -190,7 +190,7 @@ public class TestJournalNode {
|
||||||
byte[] retrievedViaHttp = DFSTestUtil.urlGetBytes(new URL(urlRoot +
|
byte[] retrievedViaHttp = DFSTestUtil.urlGetBytes(new URL(urlRoot +
|
||||||
"/getJournal?segmentTxId=1&jid=" + journalId));
|
"/getJournal?segmentTxId=1&jid=" + journalId));
|
||||||
byte[] expected = Bytes.concat(
|
byte[] expected = Bytes.concat(
|
||||||
Ints.toByteArray(HdfsConstants.LAYOUT_VERSION),
|
Ints.toByteArray(HdfsConstants.NAMENODE_LAYOUT_VERSION),
|
||||||
(new byte[] { 0, 0, 0, 0 }), // layout flags section
|
(new byte[] { 0, 0, 0, 0 }), // layout flags section
|
||||||
EDITS_DATA);
|
EDITS_DATA);
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,8 @@ public class TestDatanodeRegister {
|
||||||
// Return a a good software version.
|
// Return a a good software version.
|
||||||
doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
|
doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
|
||||||
// Return a good layout version for now.
|
// Return a good layout version for now.
|
||||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(fakeNsInfo).getLayoutVersion();
|
doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(fakeNsInfo)
|
||||||
|
.getLayoutVersion();
|
||||||
|
|
||||||
DatanodeProtocolClientSideTranslatorPB fakeDnProt =
|
DatanodeProtocolClientSideTranslatorPB fakeDnProt =
|
||||||
mock(DatanodeProtocolClientSideTranslatorPB.class);
|
mock(DatanodeProtocolClientSideTranslatorPB.class);
|
||||||
|
@ -103,20 +104,17 @@ public class TestDatanodeRegister {
|
||||||
@Test
|
@Test
|
||||||
public void testDifferentLayoutVersions() throws Exception {
|
public void testDifferentLayoutVersions() throws Exception {
|
||||||
// We expect no exceptions to be thrown when the layout versions match.
|
// We expect no exceptions to be thrown when the layout versions match.
|
||||||
assertEquals(HdfsConstants.LAYOUT_VERSION,
|
assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION,
|
||||||
actor.retrieveNamespaceInfo().getLayoutVersion());
|
actor.retrieveNamespaceInfo().getLayoutVersion());
|
||||||
|
|
||||||
// We expect an exception to be thrown when the NN reports a layout version
|
// We expect an exception to be thrown when the NN reports a layout version
|
||||||
// different from that of the DN.
|
// different from that of the DN.
|
||||||
doReturn(HdfsConstants.LAYOUT_VERSION * 1000).when(fakeNsInfo)
|
doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo)
|
||||||
.getLayoutVersion();
|
.getLayoutVersion();
|
||||||
try {
|
try {
|
||||||
actor.retrieveNamespaceInfo();
|
actor.retrieveNamespaceInfo();
|
||||||
fail("Should have failed to retrieve NS info from DN with bad layout version");
|
} catch (IOException e) {
|
||||||
} catch (IncorrectVersionException ive) {
|
fail("Should not fail to retrieve NS info from DN with different layout version");
|
||||||
GenericTestUtils.assertExceptionContains(
|
|
||||||
"Unexpected version of namenode", ive);
|
|
||||||
LOG.info("Got expected exception", ive);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
|
@ -1975,7 +1976,7 @@ public class TestCheckpoint {
|
||||||
.when(dstImage).getFiles(
|
.when(dstImage).getFiles(
|
||||||
Mockito.<NameNodeDirType>anyObject(), Mockito.anyString());
|
Mockito.<NameNodeDirType>anyObject(), Mockito.anyString());
|
||||||
|
|
||||||
Mockito.doReturn(new StorageInfo(1, 1, "X", 1).toColonSeparatedString())
|
Mockito.doReturn(new StorageInfo(1, 1, "X", 1, NodeType.NAME_NODE).toColonSeparatedString())
|
||||||
.when(dstImage).toColonSeparatedString();
|
.when(dstImage).toColonSeparatedString();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -67,7 +67,7 @@ public class TestStartupOptionUpgrade {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testStartupOptUpgradeFrom204() throws Exception {
|
public void testStartupOptUpgradeFrom204() throws Exception {
|
||||||
layoutVersion = Feature.RESERVED_REL20_204.getLayoutVersion();
|
layoutVersion = Feature.RESERVED_REL20_204.getInfo().getLayoutVersion();
|
||||||
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
||||||
assertTrue("Clusterid should start with CID", storage.getClusterID()
|
assertTrue("Clusterid should start with CID", storage.getClusterID()
|
||||||
.startsWith("CID"));
|
.startsWith("CID"));
|
||||||
|
@ -83,7 +83,7 @@ public class TestStartupOptionUpgrade {
|
||||||
@Test
|
@Test
|
||||||
public void testStartupOptUpgradeFrom22WithCID() throws Exception {
|
public void testStartupOptUpgradeFrom22WithCID() throws Exception {
|
||||||
startOpt.setClusterId("cid");
|
startOpt.setClusterId("cid");
|
||||||
layoutVersion = Feature.RESERVED_REL22.getLayoutVersion();
|
layoutVersion = Feature.RESERVED_REL22.getInfo().getLayoutVersion();
|
||||||
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
||||||
assertEquals("Clusterid should match with the given clusterid",
|
assertEquals("Clusterid should match with the given clusterid",
|
||||||
"cid", storage.getClusterID());
|
"cid", storage.getClusterID());
|
||||||
|
@ -101,7 +101,7 @@ public class TestStartupOptionUpgrade {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
// Test assumes clusterid already exists, set the clusterid
|
// Test assumes clusterid already exists, set the clusterid
|
||||||
storage.setClusterID("currentcid");
|
storage.setClusterID("currentcid");
|
||||||
layoutVersion = Feature.FEDERATION.getLayoutVersion();
|
layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
|
||||||
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
||||||
assertEquals("Clusterid should match with the existing one",
|
assertEquals("Clusterid should match with the existing one",
|
||||||
"currentcid", storage.getClusterID());
|
"currentcid", storage.getClusterID());
|
||||||
|
@ -119,7 +119,7 @@ public class TestStartupOptionUpgrade {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
startOpt.setClusterId("wrong-cid");
|
startOpt.setClusterId("wrong-cid");
|
||||||
storage.setClusterID("currentcid");
|
storage.setClusterID("currentcid");
|
||||||
layoutVersion = Feature.FEDERATION.getLayoutVersion();
|
layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
|
||||||
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
||||||
assertEquals("Clusterid should match with the existing one",
|
assertEquals("Clusterid should match with the existing one",
|
||||||
"currentcid", storage.getClusterID());
|
"currentcid", storage.getClusterID());
|
||||||
|
@ -137,7 +137,7 @@ public class TestStartupOptionUpgrade {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
startOpt.setClusterId("currentcid");
|
startOpt.setClusterId("currentcid");
|
||||||
storage.setClusterID("currentcid");
|
storage.setClusterID("currentcid");
|
||||||
layoutVersion = Feature.FEDERATION.getLayoutVersion();
|
layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
|
||||||
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
|
||||||
assertEquals("Clusterid should match with the existing one",
|
assertEquals("Clusterid should match with the existing one",
|
||||||
"currentcid", storage.getClusterID());
|
"currentcid", storage.getClusterID());
|
||||||
|
|
Loading…
Reference in New Issue