HDFS-3137. svn merge -c 1307173 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1307175 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-03-30 00:14:13 +00:00
parent 07aead07d6
commit 2c04152f30
15 changed files with 75 additions and 724 deletions

View File

@ -8,6 +8,8 @@ Release 2.0.0 - UNRELEASED
HDFS-2303. Unbundle jsvc. (Roman Shaposhnik and Mingjie Lai via eli)
HDFS-3137. Bump LAST_UPGRADABLE_LAYOUT_VERSION to -16. (eli)
NEW FEATURES
HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)

View File

@ -440,23 +440,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
}
}
/** Serialization for FSEditLog */
public void readFieldsFromFSEditLog(DataInput in) throws IOException {
this.name = DeprecatedUTF8.readString(in);
this.storageID = DeprecatedUTF8.readString(in);
this.infoPort = in.readShort() & 0x0000ffff;
this.capacity = in.readLong();
this.dfsUsed = in.readLong();
this.remaining = in.readLong();
this.blockPoolUsed = in.readLong();
this.lastUpdate = in.readLong();
this.xceiverCount = in.readInt();
this.location = Text.readString(in);
this.hostName = Text.readString(in);
setAdminState(WritableUtils.readEnum(in, AdminStates.class));
}
/**
* @return Approximate number of blocks currently scheduled to be written
* to this datanode.

View File

@ -64,18 +64,12 @@ import org.apache.hadoop.util.VersionInfo;
public abstract class Storage extends StorageInfo {
public static final Log LOG = LogFactory.getLog(Storage.class.getName());
// Constants
// last layout version that did not support upgrades
public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
// this corresponds to Hadoop-0.14.
public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7;
protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.14";
/* this should be removed when LAST_UPGRADABLE_LV goes beyond -13.
* any upgrade code that uses this constant should also be removed. */
public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
// this corresponds to Hadoop-0.18
public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -16;
protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.18";
/** Layout versions of 0.20.203 release */
public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};

View File

@ -73,9 +73,6 @@ public class DataStorage extends Storage {
public final static String STORAGE_DIR_FINALIZED = "finalized";
public final static String STORAGE_DIR_TMP = "tmp";
private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN =
Pattern.compile("(.*blk_[-]*\\d+)\\.meta$");
/** Access to this variable is guarded by "this" */
private String storageID;
@ -669,13 +666,6 @@ public class DataStorage extends Storage {
in.close();
}
} else {
//check if we are upgrading from pre-generation stamp version.
if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
// Link to the new file name.
to = new File(convertMetatadataFileName(to.getAbsolutePath()));
}
HardLink.createHardLink(from, to);
hl.linkStats.countSingleLinks++;
}
@ -687,50 +677,32 @@ public class DataStorage extends Storage {
if (!to.mkdirs())
throw new IOException("Cannot create directory " + to);
//If upgrading from old stuff, need to munge the filenames. That has to
//be done one file at a time, so hardlink them one at a time (slow).
if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
String[] blockNames = from.list(new java.io.FilenameFilter() {
public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_SUBDIR_PREFIX)
|| name.startsWith(BLOCK_FILE_PREFIX)
|| name.startsWith(COPY_FILE_PREFIX);
}
});
if (blockNames.length == 0) {
hl.linkStats.countEmptyDirs++;
String[] blockNames = from.list(new java.io.FilenameFilter() {
public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_FILE_PREFIX);
}
else for(int i = 0; i < blockNames.length; i++)
linkBlocks(new File(from, blockNames[i]),
new File(to, blockNames[i]), oldLV, hl);
}
else {
//If upgrading from a relatively new version, we only need to create
//links with the same filename. This can be done in bulk (much faster).
String[] blockNames = from.list(new java.io.FilenameFilter() {
});
// Block files just need hard links with the same file names
// but a different directory
if (blockNames.length > 0) {
HardLink.createHardLinkMult(from, blockNames, to);
hl.linkStats.countMultLinks++;
hl.linkStats.countFilesMultLinks += blockNames.length;
} else {
hl.linkStats.countEmptyDirs++;
}
// Now take care of the rest of the files and subdirectories
String[] otherNames = from.list(new java.io.FilenameFilter() {
public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_FILE_PREFIX);
return name.startsWith(BLOCK_SUBDIR_PREFIX)
|| name.startsWith(COPY_FILE_PREFIX);
}
});
if (blockNames.length > 0) {
HardLink.createHardLinkMult(from, blockNames, to);
hl.linkStats.countMultLinks++;
hl.linkStats.countFilesMultLinks += blockNames.length;
} else {
hl.linkStats.countEmptyDirs++;
}
//now take care of the rest of the files and subdirectories
String[] otherNames = from.list(new java.io.FilenameFilter() {
public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_SUBDIR_PREFIX)
|| name.startsWith(COPY_FILE_PREFIX);
}
});
for(int i = 0; i < otherNames.length; i++)
linkBlocks(new File(from, otherNames[i]),
new File(to, otherNames[i]), oldLV, hl);
}
for(int i = 0; i < otherNames.length; i++)
linkBlocks(new File(from, otherNames[i]),
new File(to, otherNames[i]), oldLV, hl);
}
private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
@ -741,22 +713,6 @@ public class DataStorage extends Storage {
um.initializeUpgrade(nsInfo);
}
/**
* This is invoked on target file names when upgrading from pre generation
* stamp version (version -13) to correct the metatadata file name.
* @param oldFileName
* @return the new metadata file name with the default generation stamp.
*/
private static String convertMetatadataFileName(String oldFileName) {
Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
if (matcher.matches()) {
//return the current metadata file name
return DatanodeUtil.getMetaFileName(matcher.group(1),
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
}
return oldFileName;
}
/**
* Add bpStorage into bpStorageMap
*/

View File

@ -64,7 +64,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.io.IOUtils;
import com.google.common.base.Joiner;
@ -233,37 +232,13 @@ public class FSEditLogLoader {
// get name and replication
final short replication = fsNamesys.getBlockManager(
).adjustReplication(addCloseOp.replication);
PermissionStatus permissions = fsNamesys.getUpgradePermission();
if (addCloseOp.permissions != null) {
permissions = addCloseOp.permissions;
}
long blockSize = addCloseOp.blockSize;
// Versions of HDFS prior to 0.17 may log an OP_ADD transaction
// which includes blocks in it. When we update the minimum
// upgrade version to something more recent than 0.17, we can
// simplify this code by asserting that OP_ADD transactions
// don't have any blocks.
// Older versions of HDFS does not store the block size in inode.
// If the file has more than one block, use the size of the
// first block as the blocksize. Otherwise use the default
// block size.
if (-8 <= logVersion && blockSize == 0) {
if (addCloseOp.blocks.length > 1) {
blockSize = addCloseOp.blocks[0].getNumBytes();
} else {
long first = ((addCloseOp.blocks.length == 1)?
addCloseOp.blocks[0].getNumBytes(): 0);
blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first);
}
}
assert addCloseOp.blocks.length == 0;
// add to the file tree
newFile = (INodeFile)fsDir.unprotectedAddFile(
addCloseOp.path, permissions,
addCloseOp.path, addCloseOp.permissions,
replication, addCloseOp.mtime,
addCloseOp.atime, blockSize,
addCloseOp.atime, addCloseOp.blockSize,
true, addCloseOp.clientName, addCloseOp.clientMachine);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
@ -375,12 +350,7 @@ public class FSEditLogLoader {
}
case OP_MKDIR: {
MkdirOp mkdirOp = (MkdirOp)op;
PermissionStatus permissions = fsNamesys.getUpgradePermission();
if (mkdirOp.permissions != null) {
permissions = mkdirOp.permissions;
}
fsDir.unprotectedMkdir(mkdirOp.path, permissions,
fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions,
mkdirOp.timestamp);
break;
}
@ -495,9 +465,6 @@ public class FSEditLogLoader {
// no data in here currently.
break;
}
case OP_DATANODE_ADD:
case OP_DATANODE_REMOVE:
break;
default:
throw new IOException("Invalid operation read " + op.opCode);
}

View File

@ -30,11 +30,8 @@ import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.util.PureJavaCrc32;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
@ -81,8 +78,6 @@ public abstract class FSEditLogOp {
instances.put(OP_DELETE, new DeleteOp());
instances.put(OP_MKDIR, new MkdirOp());
instances.put(OP_SET_GENSTAMP, new SetGenstampOp());
instances.put(OP_DATANODE_ADD, new DatanodeAddOp());
instances.put(OP_DATANODE_REMOVE, new DatanodeRemoveOp());
instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
instances.put(OP_SET_OWNER, new SetOwnerOp());
instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
@ -147,7 +142,6 @@ public abstract class FSEditLogOp {
PermissionStatus permissions;
String clientName;
String clientMachine;
//final DatanodeDescriptor[] dataNodeDescriptors; UNUSED
private AddCloseOp(FSEditLogOpCodes opCode) {
super(opCode);
@ -226,13 +220,10 @@ public abstract class FSEditLogOp {
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
// versions > 0 support per file replication
// get name and replication
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
if (-7 == logVersion && length != 3||
-17 < logVersion && logVersion < -7 && length != 4 ||
if ((-17 < logVersion && length != 4) ||
(logVersion <= -17 && length != 5 && !LayoutVersion.supports(
Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
throw new IOException("Incorrect data format." +
@ -259,49 +250,26 @@ public abstract class FSEditLogOp {
} else {
this.atime = 0;
}
if (logVersion < -7) {
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.blockSize = FSImageSerialization.readLong(in);
} else {
this.blockSize = readLong(in);
}
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.blockSize = FSImageSerialization.readLong(in);
} else {
this.blockSize = 0;
this.blockSize = readLong(in);
}
// get blocks
this.blocks = readBlocks(in, logVersion);
if (logVersion <= -11) {
this.permissions = PermissionStatus.read(in);
} else {
this.permissions = null;
}
this.permissions = PermissionStatus.read(in);
// clientname, clientMachine and block locations of last block.
if (this.opCode == OP_ADD && logVersion <= -12) {
if (this.opCode == OP_ADD) {
this.clientName = FSImageSerialization.readString(in);
this.clientMachine = FSImageSerialization.readString(in);
if (-13 <= logVersion) {
readDatanodeDescriptorArray(in);
}
} else {
this.clientName = "";
this.clientMachine = "";
}
}
/** This method is defined for compatibility reason. */
private static DatanodeDescriptor[] readDatanodeDescriptorArray(DataInput in)
throws IOException {
DatanodeDescriptor[] locations = new DatanodeDescriptor[in.readInt()];
for (int i = 0; i < locations.length; i++) {
locations[i] = new DatanodeDescriptor();
locations[i].readFieldsFromFSEditLog(in);
}
return locations;
}
private static Block[] readBlocks(
DataInputStream in,
int logVersion) throws IOException {
@ -309,14 +277,7 @@ public abstract class FSEditLogOp {
Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) {
Block blk = new Block();
if (logVersion <= -14) {
blk.readFields(in);
} else {
BlockTwo oldblk = new BlockTwo();
oldblk.readFields(in);
blk.set(oldblk.blkid, oldblk.len,
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
}
blk.readFields(in);
blocks[i] = blk;
}
return blocks;
@ -788,17 +749,14 @@ public abstract class FSEditLogOp {
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
void readFields(DataInputStream in, int logVersion) throws IOException {
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
if (-17 < logVersion && length != 2 ||
logVersion <= -17 && length != 3
&& !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
throw new IOException("Incorrect data format. "
+ "Mkdir operation.");
throw new IOException("Incorrect data format. Mkdir operation.");
}
this.path = FSImageSerialization.readString(in);
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
@ -811,7 +769,6 @@ public abstract class FSEditLogOp {
// However, currently this is not being updated/used because of
// performance reasons.
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
/* unused this.atime = */
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
FSImageSerialization.readLong(in);
} else {
@ -819,11 +776,7 @@ public abstract class FSEditLogOp {
}
}
if (logVersion <= -11) {
this.permissions = PermissionStatus.read(in);
} else {
this.permissions = null;
}
this.permissions = PermissionStatus.read(in);
}
@Override
@ -888,77 +841,6 @@ public abstract class FSEditLogOp {
}
}
@SuppressWarnings("deprecation")
static class DatanodeAddOp extends FSEditLogOp {
private DatanodeAddOp() {
super(OP_DATANODE_ADD);
}
static DatanodeAddOp getInstance() {
return (DatanodeAddOp)opInstances.get()
.get(OP_DATANODE_ADD);
}
@Override
void writeFields(DataOutputStream out) throws IOException {
throw new IOException("Deprecated, should not write");
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
//Datanodes are not persistent any more.
FSImageSerialization.DatanodeImage.skipOne(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DatanodeAddOp [opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
@SuppressWarnings("deprecation")
static class DatanodeRemoveOp extends FSEditLogOp {
private DatanodeRemoveOp() {
super(OP_DATANODE_REMOVE);
}
static DatanodeRemoveOp getInstance() {
return (DatanodeRemoveOp)opInstances.get()
.get(OP_DATANODE_REMOVE);
}
@Override
void writeFields(DataOutputStream out) throws IOException {
throw new IOException("Deprecated, should not write");
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
DatanodeID nodeID = new DatanodeID();
nodeID.readFields(in);
//Datanodes are not persistent any more.
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DatanodeRemoveOp [opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
static class SetPermissionsOp extends FSEditLogOp {
String src;
FsPermission permissions;

View File

@ -36,8 +36,8 @@ public enum FSEditLogOpCodes {
OP_DELETE ((byte) 2),
OP_MKDIR ((byte) 3),
OP_SET_REPLICATION ((byte) 4),
@Deprecated OP_DATANODE_ADD ((byte) 5),
@Deprecated OP_DATANODE_REMOVE((byte) 6),
@Deprecated OP_DATANODE_ADD ((byte) 5), // obsolete
@Deprecated OP_DATANODE_REMOVE((byte) 6), // obsolete
OP_SET_PERMISSIONS ((byte) 7),
OP_SET_OWNER ((byte) 8),
OP_CLOSE ((byte) 9),

View File

@ -131,34 +131,22 @@ class FSImageFormat {
DataInputStream in = new DataInputStream(fin);
try {
/*
* Note: Remove any checks for version earlier than
* Storage.LAST_UPGRADABLE_LAYOUT_VERSION since we should never get
* to here with older images.
*/
/*
* TODO we need to change format of the image file
* it should not contain version and namespace fields
*/
// read image version: first appeared in version -1
int imgVersion = in.readInt();
if(getLayoutVersion() != imgVersion)
if (getLayoutVersion() != imgVersion) {
throw new InconsistentFSStateException(curFile,
"imgVersion " + imgVersion +
" expected to be " + getLayoutVersion());
}
// read namespaceID: first appeared in version -2
in.readInt();
// read number of files
long numFiles = readNumFiles(in);
long numFiles = in.readLong();
// read in the last generation stamp.
if (imgVersion <= -12) {
long genstamp = in.readLong();
namesystem.setGenerationStamp(genstamp);
}
long genstamp = in.readLong();
namesystem.setGenerationStamp(genstamp);
// read the transaction ID of the last edit represented by
// this image
@ -167,7 +155,6 @@ class FSImageFormat {
} else {
imgTxId = 0;
}
// read compression related info
FSImageCompression compression;
@ -189,13 +176,9 @@ class FSImageFormat {
loadFullNameINodes(numFiles, in);
}
// load datanode info
this.loadDatanodes(in);
loadFilesUnderConstruction(in);
// load Files Under Construction
this.loadFilesUnderConstruction(in);
this.loadSecretManagerState(in);
loadSecretManagerState(in);
// make sure to read to the end of file
int eof = in.read();
@ -335,89 +318,44 @@ class FSImageFormat {
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
atime = in.readLong();
}
if (imgVersion <= -8) {
blockSize = in.readLong();
}
blockSize = in.readLong();
int numBlocks = in.readInt();
BlockInfo blocks[] = null;
// for older versions, a blocklist of size 0
// indicates a directory.
if ((-9 <= imgVersion && numBlocks > 0) ||
(imgVersion < -9 && numBlocks >= 0)) {
if (numBlocks >= 0) {
blocks = new BlockInfo[numBlocks];
for (int j = 0; j < numBlocks; j++) {
blocks[j] = new BlockInfo(replication);
if (-14 < imgVersion) {
blocks[j].set(in.readLong(), in.readLong(),
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
} else {
blocks[j].readFields(in);
}
}
}
// Older versions of HDFS does not store the block size in inode.
// If the file has more than one block, use the size of the
// first block as the blocksize. Otherwise use the default block size.
//
if (-8 <= imgVersion && blockSize == 0) {
if (numBlocks > 1) {
blockSize = blocks[0].getNumBytes();
} else {
long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0);
blockSize = Math.max(namesystem.getDefaultBlockSize(), first);
blocks[j].readFields(in);
}
}
// get quota only when the node is a directory
long nsQuota = -1L;
if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)
&& blocks == null && numBlocks == -1) {
nsQuota = in.readLong();
}
long dsQuota = -1L;
if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
&& blocks == null && numBlocks == -1) {
dsQuota = in.readLong();
}
// Read the symlink only when the node is a symlink
String symlink = "";
if (numBlocks == -2) {
symlink = Text.readString(in);
}
PermissionStatus permissions = namesystem.getUpgradePermission();
if (imgVersion <= -11) {
permissions = PermissionStatus.read(in);
}
return INode.newINode(permissions, blocks, symlink, replication,
modificationTime, atime, nsQuota, dsQuota, blockSize);
if (blocks == null && numBlocks == -1) {
nsQuota = in.readLong();
}
long dsQuota = -1L;
if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
&& blocks == null && numBlocks == -1) {
dsQuota = in.readLong();
}
private void loadDatanodes(DataInputStream in)
throws IOException {
int imgVersion = getLayoutVersion();
if (imgVersion > -3) // pre datanode image version
return;
if (imgVersion <= -12) {
return; // new versions do not store the datanodes any more.
}
int size = in.readInt();
for(int i = 0; i < size; i++) {
// We don't need to add these descriptors any more.
FSImageSerialization.DatanodeImage.skipOne(in);
}
// Read the symlink only when the node is a symlink
String symlink = "";
if (numBlocks == -2) {
symlink = Text.readString(in);
}
PermissionStatus permissions = PermissionStatus.read(in);
return INode.newINode(permissions, blocks, symlink, replication,
modificationTime, atime, nsQuota, dsQuota, blockSize);
}
private void loadFilesUnderConstruction(DataInputStream in)
throws IOException {
FSDirectory fsDir = namesystem.dir;
int imgVersion = getLayoutVersion();
if (imgVersion > -13) // pre lease image version
return;
int size = in.readInt();
LOG.info("Number of files under construction = " + size);
@ -457,17 +395,6 @@ class FSImageFormat {
return namesystem.getFSImage().getStorage().getLayoutVersion();
}
private long readNumFiles(DataInputStream in)
throws IOException {
int imgVersion = getLayoutVersion();
if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)) {
return in.readLong();
} else {
return in.readInt();
}
}
private boolean isRoot(byte[][] path) {
return path.length == 1 &&
path[0] == null;

View File

@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
@ -31,7 +29,6 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@ -39,7 +36,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
@ -107,13 +103,10 @@ public class FSImageSerialization {
String clientName = readString(in);
String clientMachine = readString(in);
// These locations are not used at all
// We previously stored locations for the last block, now we
// just record that there are none
int numLocs = in.readInt();
DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
for (i = 0; i < numLocs; i++) {
locations[i] = new DatanodeDescriptor();
locations[i].readFields(in);
}
assert numLocs == 0 : "Unexpected block locations";
return new INodeFileUnderConstruction(name,
blockReplication,
@ -320,53 +313,4 @@ public class FSImageSerialization {
}
return ret;
}
/**
* DatanodeImage is used to store persistent information
* about datanodes into the fsImage.
*/
static class DatanodeImage implements Writable {
DatanodeDescriptor node = new DatanodeDescriptor();
static void skipOne(DataInput in) throws IOException {
DatanodeImage nodeImage = new DatanodeImage();
nodeImage.readFields(in);
}
/////////////////////////////////////////////////
// Writable
/////////////////////////////////////////////////
/**
* Public method that serializes the information about a
* Datanode to be stored in the fsImage.
*/
public void write(DataOutput out) throws IOException {
new DatanodeID(node).write(out);
out.writeLong(node.getCapacity());
out.writeLong(node.getRemaining());
out.writeLong(node.getLastUpdate());
out.writeInt(node.getXceiverCount());
}
/**
* Public method that reads a serialized Datanode
* from the fsImage.
*/
public void readFields(DataInput in) throws IOException {
DatanodeID id = new DatanodeID();
id.readFields(in);
long capacity = in.readLong();
long remaining = in.readLong();
long lastUpdate = in.readLong();
int xceiverCount = in.readInt();
// update the DatanodeDescriptor with the data we read in
node.updateRegInfo(id);
node.setStorageID(id.getStorageID());
node.setCapacity(capacity);
node.setRemaining(remaining);
node.setLastUpdate(lastUpdate);
node.setXceiverCount(xceiverCount);
}
}
}

View File

@ -877,14 +877,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
}
/**
* Return the default path permission when upgrading from releases with no
* permissions (<=0.15) to releases with permissions (>=0.16)
*/
protected PermissionStatus getUpgradePermission() {
return defaultPermission;
}
NamespaceInfo getNamespaceInfo() {
readLock();
try {

View File

@ -52,7 +52,6 @@ public class TestDFSUpgradeFromImage extends TestCase {
.getLog(TestDFSUpgradeFromImage.class);
private static File TEST_ROOT_DIR =
new File(MiniDFSCluster.getBaseDirectory());
private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz";
private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
@ -68,10 +67,6 @@ public class TestDFSUpgradeFromImage extends TestCase {
boolean printChecksum = false;
public void unpackStorage() throws IOException {
unpackStorage(HADOOP14_IMAGE);
}
private void unpackStorage(String tarFileName)
throws IOException {
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
@ -227,14 +222,6 @@ public class TestDFSUpgradeFromImage extends TestCase {
}
}
/**
* Test upgrade from an 0.14 image
*/
public void testUpgradeFromRel14Image() throws IOException {
unpackStorage();
upgradeAndVerify();
}
/**
* Test upgrade from 0.22 image
*/

View File

@ -1,267 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.LAYOUT_VERSION;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode;
import org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import static org.junit.Assert.*;
/**
*/
public class TestDistributedUpgrade {
private static final Log LOG = LogFactory.getLog(TestDistributedUpgrade.class);
private Configuration conf;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
private String clusterId = "testClsterId";
/**
* Writes an INFO log message containing the parameters.
*/
void log(String label, int numDirs) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
+ label + ":"
+ " numDirs="+numDirs);
}
/**
* Attempts to start a NameNode with the given operation. Starting
* the NameNode should throw an exception.
*/
void startNameNodeShouldFail(StartupOption operation,
String exceptionSubstring) {
try {
//cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).startupOption(operation).build(); // should fail
// we set manage dirs to true as NN has to start from untar'ed image with
// nn dirs set to name1 and name2
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.clusterId(clusterId)
.startupOption(operation)
.build(); // should fail
throw new AssertionError("NameNode should have failed to start");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains(
exceptionSubstring, expected);
}
}
/**
* Attempts to start a DataNode with the given operation. Starting
* the DataNode should throw an exception.
*/
void startDataNodeShouldFail(StartupOption operation) {
try {
cluster.startDataNodes(conf, 1, false, operation, null); // should fail
throw new AssertionError("DataNode should have failed to start");
} catch (Exception expected) {
// expected
assertFalse(cluster.isDataNodeUp());
}
}
/**
*/
@Test(timeout=300000) // 5 min timeout
public void testDistributedUpgrade() throws Exception {
int numDirs = 1;
TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
testImg.unpackStorage();
int numDNs = testImg.numDataNodes;
// register new upgrade objects (ignore all existing)
UpgradeObjectCollection.initialize();
UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());
conf = new HdfsConfiguration();
if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
System.setProperty("test.build.data", "build/test/data");
}
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
startNameNodeShouldFail(StartupOption.REGULAR, "contains an old layout version");
log("Start NameNode only distributed upgrade", numDirs);
// cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
// .startupOption(StartupOption.UPGRADE).build();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.clusterId(clusterId)
.startupOption(StartupOption.UPGRADE)
.build();
cluster.shutdown();
log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
startNameNodeShouldFail(StartupOption.REGULAR,
"Previous distributed upgrade was not completed");
log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
startNameNodeShouldFail(StartupOption.ROLLBACK,
"Cannot rollback to storage version -7 using this version");
log("Normal distributed upgrade for the cluster", numDirs);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDNs)
.format(false)
.clusterId(clusterId)
.startupOption(StartupOption.UPGRADE)
.build();
DFSAdmin dfsAdmin = new DFSAdmin();
dfsAdmin.setConf(conf);
dfsAdmin.run(new String[] {"-safemode", "wait"});
cluster.shutdown();
// it should be ok to start in regular mode
log("NameCluster regular startup after the upgrade", numDirs);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDNs)
.clusterId(clusterId)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
cluster.waitActive();
cluster.shutdown();
}
public static void main(String[] args) throws Exception {
new TestDistributedUpgrade().testDistributedUpgrade();
LOG.info("=== DONE ===");
}
}
/**
* Upgrade object for data-node
*/
class UO_Datanode extends UpgradeObjectDatanode {
int version;
UO_Datanode(int v) {
this.status = (short)0;
version = v;
}
public int getVersion() {
return version;
}
public void doUpgrade() throws IOException {
this.status = (short)100;
DatanodeProtocol nn = getNamenode();
nn.processUpgradeCommand(
new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS,
getVersion(), getUpgradeStatus()));
}
public UpgradeCommand startUpgrade() throws IOException {
return null;
}
}
/**
* Upgrade object for name-node
*/
class UO_Namenode extends UpgradeObjectNamenode {
int version;
UO_Namenode(int v) {
status = (short)0;
version = v;
}
public int getVersion() {
return version;
}
synchronized public UpgradeCommand processUpgradeCommand(
UpgradeCommand command) throws IOException {
switch(command.getAction()) {
case UpgradeCommand.UC_ACTION_REPORT_STATUS:
this.status += command.getCurrentStatus()/8; // 4 reports needed
break;
default:
this.status++;
}
return null;
}
public UpgradeCommand completeUpgrade() throws IOException {
return null;
}
}
class UO_Datanode1 extends UO_Datanode {
UO_Datanode1() {
super(LAYOUT_VERSION+1);
}
}
class UO_Namenode1 extends UO_Namenode {
UO_Namenode1() {
super(LAYOUT_VERSION+1);
}
}
class UO_Datanode2 extends UO_Datanode {
UO_Datanode2() {
super(LAYOUT_VERSION+2);
}
}
class UO_Namenode2 extends UO_Namenode {
UO_Namenode2() {
super(LAYOUT_VERSION+2);
}
}
class UO_Datanode3 extends UO_Datanode {
UO_Datanode3() {
super(LAYOUT_VERSION+3);
}
}
class UO_Namenode3 extends UO_Namenode {
UO_Namenode3() {
super(LAYOUT_VERSION+3);
}
}

View File

@ -65,12 +65,11 @@ public class TestOfflineEditsViewer {
*
* These are the opcodes that are not used anymore, some
* are marked deprecated, we need to include them here to make
* sure we exclude them when checking for completness of testing,
* sure we exclude them when checking for completeness of testing,
* that's why the "deprecation" warnings are suppressed.
*/
@SuppressWarnings("deprecation")
private static void initializeObsoleteOpCodes() {
// these are obsolete
obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_ADD, true);
obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_REMOVE, true);
obsoleteOpCodes.put(FSEditLogOpCodes.OP_SET_NS_QUOTA, true);

View File

@ -19,18 +19,6 @@
# See HADOOP-1629 for more info if needed.
# These two files are used by unit test TestDFSUpgradeFromImage.java
#
# hadoop-14-dfs-dir.tgz :
# ---------------------
# This file contains the HDFS directory structure for one namenode and 4 datanodes.
# The structure is setup similar to the structure used in MiniDFSCluster.
# The directory was created with Hadoo-0.14.x.
#
# In the test, this directory is unpacked and MiniDFSCluster is run with
# "-upgrade" option. The test waits for the upgrade to complete
# (leave safe mode) and then all the files are read. The test checks that the
# directory structure and file checksums exactly match the information
# in this file.
#
# hadoop-dfs-dir.txt :
# ---------------------
# Along with this description this file contains the expected files and
@ -43,9 +31,6 @@
# For e.g. "top-dir-1Mb-512" contains files created with dfs.blocksize of 1Mb
# and io.bytes.per.checksum of 512.
#
# In the future, when Hadoop project no longer supports upgrade from
# Hadoop-0.12, then a new DFS directory image must be created.
#
# To generate checksum info for new files :
# ---------------------------------------
# Uncomment the last coment (starts with "printChecksums") and run the