HDFS-6072. Clean up dead code of FSImage. Contributed by Haohui Mai.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1576513 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Haohui Mai 2014-03-11 21:44:38 +00:00
parent 8d29768704
commit b027ef8858
12 changed files with 30 additions and 952 deletions

View File

@ -401,6 +401,8 @@ Release 2.4.0 - UNRELEASED
HDFS-6085. Improve CacheReplicationMonitor log messages a bit (cmccabe)
HDFS-6072. Clean up dead code of FSImage. (wheat9)
OPTIMIZATIONS
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery

View File

@ -18,16 +18,9 @@
package org.apache.hadoop.hdfs.security.token.delegation;
import java.io.DataInput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -50,9 +43,13 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import java.io.DataInput;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
/**
* A HDFS specific delegation token secret manager.
@ -214,18 +211,6 @@ public class DelegationTokenSecretManager
}
}
/**
* Store the current state of the SecretManager for persistence
*
* @param out Output stream for writing into fsimage.
* @param sdPath String storage directory path
* @throws IOException
*/
public synchronized void saveSecretManagerStateCompat(DataOutputStream out,
String sdPath) throws IOException {
serializerCompat.save(out, sdPath);
}
public synchronized SecretManagerState saveSecretManagerState() {
SecretManagerSection s = SecretManagerSection.newBuilder()
.setCurrentId(currentId)
@ -421,56 +406,6 @@ public class DelegationTokenSecretManager
loadCurrentTokens(in);
}
private void save(DataOutputStream out, String sdPath) throws IOException {
out.writeInt(currentId);
saveAllKeys(out, sdPath);
out.writeInt(delegationTokenSequenceNumber);
saveCurrentTokens(out, sdPath);
}
/**
* Private helper methods to save delegation keys and tokens in fsimage
*/
private synchronized void saveCurrentTokens(DataOutputStream out,
String sdPath) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(currentTokens.size());
Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
.iterator();
while (iter.hasNext()) {
DelegationTokenIdentifier id = iter.next();
id.write(out);
DelegationTokenInformation info = currentTokens.get(id);
out.writeLong(info.getRenewDate());
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
/*
* Save the current state of allKeys
*/
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(allKeys.size());
Iterator<Integer> iter = allKeys.keySet().iterator();
while (iter.hasNext()) {
Integer key = iter.next();
allKeys.get(key).write(out);
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
/**
* Private helper methods to load Delegation tokens from fsimage
*/

View File

@ -27,7 +27,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT;
import java.io.DataInput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@ -62,10 +61,10 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
@ -952,18 +951,6 @@ public final class CacheManager {
}
}
/**
* Saves the current state of the CacheManager to the DataOutput. Used
* to persist CacheManager state in the FSImage.
* @param out DataOutput to persist state
* @param sdPath path of the storage directory
* @throws IOException
*/
public void saveStateCompat(DataOutputStream out, String sdPath)
throws IOException {
serializerCompat.save(out, sdPath);
}
public PersistState saveState() throws IOException {
ArrayList<CachePoolInfoProto> pools = Lists
.newArrayListWithCapacity(cachePools.size());
@ -1083,12 +1070,6 @@ public final class CacheManager {
}
private final class SerializerCompat {
private void save(DataOutputStream out, String sdPath) throws IOException {
out.writeLong(nextDirectiveId);
savePools(out, sdPath);
saveDirectives(out, sdPath);
}
private void load(DataInput in) throws IOException {
nextDirectiveId = in.readLong();
// pools need to be loaded first since directives point to their parent pool
@ -1096,42 +1077,6 @@ public final class CacheManager {
loadDirectives(in);
}
/**
* Save cache pools to fsimage
*/
private void savePools(DataOutputStream out,
String sdPath) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_POOLS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(cachePools.size());
for (CachePool pool: cachePools.values()) {
FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
/*
* Save cache entries to fsimage
*/
private void saveDirectives(DataOutputStream out, String sdPath)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(directivesById.size());
for (CacheDirective directive : directivesById.values()) {
FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
/**
* Load cache pools from fsimage
*/

View File

@ -21,20 +21,14 @@ import static org.apache.hadoop.util.Time.now;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.security.DigestInputStream;
import java.security.DigestOutputStream;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
@ -56,7 +50,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@ -67,7 +60,6 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text;
@ -77,105 +69,8 @@ import com.google.common.base.Preconditions;
import com.google.common.annotations.VisibleForTesting;
/**
* Contains inner classes for reading or writing the on-disk format for
* FSImages.
*
* In particular, the format of the FSImage looks like:
* <pre>
* FSImage {
* layoutVersion: int, namespaceID: int, numberItemsInFSDirectoryTree: long,
* namesystemGenerationStampV1: long, namesystemGenerationStampV2: long,
* generationStampAtBlockIdSwitch:long, lastAllocatedBlockId:
* long transactionID: long, snapshotCounter: int, numberOfSnapshots: int,
* numOfSnapshottableDirs: int,
* {FSDirectoryTree, FilesUnderConstruction, SecretManagerState} (can be compressed)
* }
*
* FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported) {
* INodeInfo of root, numberOfChildren of root: int
* [list of INodeInfo of root's children],
* [list of INodeDirectoryInfo of root's directory children]
* }
*
* FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} not supported){
* [list of INodeInfo of INodes in topological order]
* }
*
* INodeInfo {
* {
* localName: short + byte[]
* } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported
* or
* {
* fullPath: byte[]
* } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
* replicationFactor: short, modificationTime: long,
* accessTime: long, preferredBlockSize: long,
* numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
* {
* nsQuota: long, dsQuota: long,
* {
* isINodeSnapshottable: byte,
* isINodeWithSnapshot: byte (if isINodeSnapshottable is false)
* } (when {@link Feature#SNAPSHOT} is supported),
* fsPermission: short, PermissionStatus
* } for INodeDirectory
* or
* {
* symlinkString, fsPermission: short, PermissionStatus
* } for INodeSymlink
* or
* {
* [list of BlockInfo]
* [list of FileDiff]
* {
* isINodeFileUnderConstructionSnapshot: byte,
* {clientName: short + byte[], clientMachine: short + byte[]} (when
* isINodeFileUnderConstructionSnapshot is true),
* } (when {@link Feature#SNAPSHOT} is supported and writing snapshotINode),
* fsPermission: short, PermissionStatus
* } for INodeFile
* }
*
* INodeDirectoryInfo {
* fullPath of the directory: short + byte[],
* numberOfChildren: int, [list of INodeInfo of children INode],
* {
* numberOfSnapshots: int,
* [list of Snapshot] (when NumberOfSnapshots is positive),
* numberOfDirectoryDiffs: int,
* [list of DirectoryDiff] (NumberOfDirectoryDiffs is positive),
* number of children that are directories,
* [list of INodeDirectoryInfo of the directory children] (includes
* snapshot copies of deleted sub-directories)
* } (when {@link Feature#SNAPSHOT} is supported),
* }
*
* Snapshot {
* snapshotID: int, root of Snapshot: INodeDirectoryInfo (its local name is
* the name of the snapshot)
* }
*
* DirectoryDiff {
* full path of the root of the associated Snapshot: short + byte[],
* childrenSize: int,
* isSnapshotRoot: byte,
* snapshotINodeIsNotNull: byte (when isSnapshotRoot is false),
* snapshotINode: INodeDirectory (when SnapshotINodeIsNotNull is true), Diff
* }
*
* Diff {
* createdListSize: int, [Local name of INode in created list],
* deletedListSize: int, [INode in deleted list: INodeInfo]
* }
*
* FileDiff {
* full path of the root of the associated Snapshot: short + byte[],
* fileSize: long,
* snapshotINodeIsNotNull: byte,
* snapshotINode: INodeFile (when SnapshotINodeIsNotNull is true), Diff
* }
* </pre>
* This class loads and stores the FSImage of the NameNode. The file
* src/main/proto/fsimage.proto describes the on-disk layout of the FSImage.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@ -683,11 +578,6 @@ public class FSImageFormat {
}
}
/** @return The FSDirectory of the namesystem where the fsimage is loaded */
public FSDirectory getFSDirectoryInLoading() {
return namesystem.dir;
}
public INode loadINodeWithLocalName(boolean isSnapshotINode, DataInput in,
boolean updateINodeMap) throws IOException {
return loadINodeWithLocalName(isSnapshotINode, in, updateINodeMap, null);
@ -1117,7 +1007,7 @@ public class FSImageFormat {
+ " option to automatically rename these paths during upgrade.";
/**
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
* Same as {@link #renameReservedPathsOnUpgrade}, but for a single
* byte array path component.
*/
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
@ -1138,7 +1028,7 @@ public class FSImageFormat {
}
/**
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
* Same as {@link #renameReservedPathsOnUpgrade}, but for a single
* byte array path component.
*/
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
@ -1160,266 +1050,4 @@ public class FSImageFormat {
}
return component;
}
/**
* A one-shot class responsible for writing an image file.
* The write() function should be called once, after which the getter
* functions may be used to retrieve information about the file that was written.
*/
static class Saver {
private final SaveNamespaceContext context;
/** Set to true once an image has been written */
private boolean saved = false;
/** The MD5 checksum of the file that was written */
private MD5Hash savedDigest;
private final ReferenceMap referenceMap = new ReferenceMap();
private final Map<Long, INodeFile> snapshotUCMap =
new HashMap<Long, INodeFile>();
/** @throws IllegalStateException if the instance has not yet saved an image */
private void checkSaved() {
if (!saved) {
throw new IllegalStateException("FSImageSaver has not saved an image");
}
}
/** @throws IllegalStateException if the instance has already saved an image */
private void checkNotSaved() {
if (saved) {
throw new IllegalStateException("FSImageSaver has already saved an image");
}
}
Saver(SaveNamespaceContext context) {
this.context = context;
}
/**
* Return the MD5 checksum of the image file that was saved.
*/
MD5Hash getSavedDigest() {
checkSaved();
return savedDigest;
}
void save(File newFile, FSImageCompression compression) throws IOException {
checkNotSaved();
final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
final long numINodes = rootDir.getDirectoryWithQuotaFeature()
.getSpaceConsumed().get(Quota.NAMESPACE);
String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
Step step = new Step(StepType.INODES, sdPath);
StartupProgress prog = NameNode.getStartupProgress();
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
long startTime = now();
//
// Write out data
//
MessageDigest digester = MD5Hash.getDigester();
FileOutputStream fout = new FileOutputStream(newFile);
DigestOutputStream fos = new DigestOutputStream(fout, digester);
DataOutputStream out = new DataOutputStream(fos);
try {
out.writeInt(HdfsConstants.NAMENODE_LAYOUT_VERSION);
LayoutFlags.write(out);
// We use the non-locked version of getNamespaceInfo here since
// the coordinating thread of saveNamespace already has read-locked
// the namespace for us. If we attempt to take another readlock
// from the actual saver thread, there's a potential of a
// fairness-related deadlock. See the comments on HDFS-2223.
out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
.getNamespaceID());
out.writeLong(numINodes);
out.writeLong(sourceNamesystem.getGenerationStampV1());
out.writeLong(sourceNamesystem.getGenerationStampV2());
out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
out.writeLong(sourceNamesystem.getLastAllocatedBlockId());
out.writeLong(context.getTxId());
out.writeLong(sourceNamesystem.getLastInodeId());
sourceNamesystem.getSnapshotManager().write(out);
// write compression info and set up compressed stream
out = compression.writeHeaderAndWrapStream(fos);
LOG.info("Saving image file " + newFile +
" using " + compression);
// save the root
saveINode2Image(rootDir, out, false, referenceMap, counter);
// save the rest of the nodes
saveImage(rootDir, out, true, false, counter);
prog.endStep(Phase.SAVING_CHECKPOINT, step);
// Now that the step is finished, set counter equal to total to adjust
// for possible under-counting due to reference inodes.
prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
// save files under construction
// TODO: for HDFS-5428, since we cannot break the compatibility of
// fsimage, we store part of the under-construction files that are only
// in snapshots in this "under-construction-file" section. As a
// temporary solution, we use "/.reserved/.inodes/<inodeid>" as their
// paths, so that when loading fsimage we do not put them into the lease
// map. In the future, we can remove this hack when we can bump the
// layout version.
sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap);
context.checkCancelled();
sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
context.checkCancelled();
sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
context.checkCancelled();
out.flush();
context.checkCancelled();
fout.getChannel().force(true);
} finally {
out.close();
}
saved = true;
// set md5 of the saved image
savedDigest = new MD5Hash(digester.digest());
LOG.info("Image file " + newFile + " of size " + newFile.length() +
" bytes saved in " + (now() - startTime)/1000 + " seconds.");
}
/**
* Save children INodes.
* @param children The list of children INodes
* @param out The DataOutputStream to write
* @param inSnapshot Whether the parent directory or its ancestor is in
* the deleted list of some snapshot (caused by rename or
* deletion)
* @param counter Counter to increment for namenode startup progress
* @return Number of children that are directory
*/
private int saveChildren(ReadOnlyList<INode> children,
DataOutputStream out, boolean inSnapshot, Counter counter)
throws IOException {
// Write normal children INode.
out.writeInt(children.size());
int dirNum = 0;
int i = 0;
for(INode child : children) {
// print all children first
// TODO: for HDFS-5428, we cannot change the format/content of fsimage
// here, thus even if the parent directory is in snapshot, we still
// do not handle INodeUC as those stored in deleted list
saveINode2Image(child, out, false, referenceMap, counter);
if (child.isDirectory()) {
dirNum++;
} else if (inSnapshot && child.isFile()
&& child.asFile().isUnderConstruction()) {
this.snapshotUCMap.put(child.getId(), child.asFile());
}
if (i++ % 50 == 0) {
context.checkCancelled();
}
}
return dirNum;
}
/**
* Save file tree image starting from the given root.
* This is a recursive procedure, which first saves all children and
* snapshot diffs of a current directory and then moves inside the
* sub-directories.
*
* @param current The current node
* @param out The DataoutputStream to write the image
* @param toSaveSubtree Whether or not to save the subtree to fsimage. For
* reference node, its subtree may already have been
* saved before.
* @param inSnapshot Whether the current directory is in snapshot
* @param counter Counter to increment for namenode startup progress
*/
private void saveImage(INodeDirectory current, DataOutputStream out,
boolean toSaveSubtree, boolean inSnapshot, Counter counter)
throws IOException {
// write the inode id of the directory
out.writeLong(current.getId());
if (!toSaveSubtree) {
return;
}
final ReadOnlyList<INode> children = current
.getChildrenList(Snapshot.CURRENT_STATE_ID);
int dirNum = 0;
List<INodeDirectory> snapshotDirs = null;
DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature();
if (sf != null) {
snapshotDirs = new ArrayList<INodeDirectory>();
sf.getSnapshotDirectory(snapshotDirs);
dirNum += snapshotDirs.size();
}
// 2. Write INodeDirectorySnapshottable#snapshotsByNames to record all
// Snapshots
if (current instanceof INodeDirectorySnapshottable) {
INodeDirectorySnapshottable snapshottableNode =
(INodeDirectorySnapshottable) current;
SnapshotFSImageFormat.saveSnapshots(snapshottableNode, out);
} else {
out.writeInt(-1); // # of snapshots
}
// 3. Write children INode
dirNum += saveChildren(children, out, inSnapshot, counter);
// 4. Write DirectoryDiff lists, if there is any.
SnapshotFSImageFormat.saveDirectoryDiffList(current, out, referenceMap);
// Write sub-tree of sub-directories, including possible snapshots of
// deleted sub-directories
out.writeInt(dirNum); // the number of sub-directories
for(INode child : children) {
if(!child.isDirectory()) {
continue;
}
// make sure we only save the subtree under a reference node once
boolean toSave = child.isReference() ?
referenceMap.toProcessSubtree(child.getId()) : true;
saveImage(child.asDirectory(), out, toSave, inSnapshot, counter);
}
if (snapshotDirs != null) {
for (INodeDirectory subDir : snapshotDirs) {
// make sure we only save the subtree under a reference node once
boolean toSave = subDir.getParentReference() != null ?
referenceMap.toProcessSubtree(subDir.getId()) : true;
saveImage(subDir, out, toSave, true, counter);
}
}
}
/**
* Saves inode and increments progress counter.
*
* @param inode INode to save
* @param out DataOutputStream to receive inode
* @param writeUnderConstruction boolean true if this is under construction
* @param referenceMap ReferenceMap containing reference inodes
* @param counter Counter to increment for namenode startup progress
* @throws IOException thrown if there is an I/O error
*/
private void saveINode2Image(INode inode, DataOutputStream out,
boolean writeUnderConstruction, ReferenceMap referenceMap,
Counter counter) throws IOException {
FSImageSerialization.saveINode2Image(inode, out, writeUnderConstruction,
referenceMap);
// Intentionally do not increment counter for reference inodes, because it
// is too difficult at this point to assess whether or not this is a
// reference that counts toward quota.
if (!(inode instanceof INodeReference)) {
counter.increment();
}
}
}
}

View File

@ -17,11 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
@ -36,21 +31,20 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;
import com.google.common.base.Preconditions;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
/**
* Static utility functions for serializing various pieces of data in the correct
@ -88,26 +82,6 @@ public class FSImageSerialization {
final ShortWritable U_SHORT = new ShortWritable();
final IntWritable U_INT = new IntWritable();
final LongWritable U_LONG = new LongWritable();
final FsPermission FILE_PERM = new FsPermission((short) 0);
}
private static void writePermissionStatus(INodeAttributes inode,
DataOutput out) throws IOException {
final FsPermission p = TL_DATA.get().FILE_PERM;
p.fromShort(inode.getFsPermissionShort());
PermissionStatus.write(out, inode.getUserName(), inode.getGroupName(), p);
}
private static void writeBlocks(final Block[] blocks,
final DataOutput out) throws IOException {
if (blocks == null) {
out.writeInt(0);
} else {
out.writeInt(blocks.length);
for (Block blk : blocks) {
blk.write(out);
}
}
}
// Helper function that reads in an INodeUnderConstruction
@ -153,183 +127,6 @@ public class FSImageSerialization {
return file;
}
// Helper function that writes an INodeUnderConstruction
// into the input stream
//
static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons,
String path) throws IOException {
writeString(path, out);
out.writeLong(cons.getId());
out.writeShort(cons.getFileReplication());
out.writeLong(cons.getModificationTime());
out.writeLong(cons.getPreferredBlockSize());
writeBlocks(cons.getBlocks(), out);
cons.getPermissionStatus().write(out);
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
writeString(uc.getClientName(), out);
writeString(uc.getClientMachine(), out);
out.writeInt(0); // do not store locations of last block
}
/**
* Serialize a {@link INodeFile} node
* @param node The node to write
* @param out The {@link DataOutputStream} where the fields are written
* @param writeBlock Whether to write block information
*/
public static void writeINodeFile(INodeFile file, DataOutput out,
boolean writeUnderConstruction) throws IOException {
writeLocalName(file, out);
out.writeLong(file.getId());
out.writeShort(file.getFileReplication());
out.writeLong(file.getModificationTime());
out.writeLong(file.getAccessTime());
out.writeLong(file.getPreferredBlockSize());
writeBlocks(file.getBlocks(), out);
SnapshotFSImageFormat.saveFileDiffList(file, out);
if (writeUnderConstruction) {
if (file.isUnderConstruction()) {
out.writeBoolean(true);
final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
writeString(uc.getClientName(), out);
writeString(uc.getClientMachine(), out);
} else {
out.writeBoolean(false);
}
}
writePermissionStatus(file, out);
}
/** Serialize an {@link INodeFileAttributes}. */
public static void writeINodeFileAttributes(INodeFileAttributes file,
DataOutput out) throws IOException {
writeLocalName(file, out);
writePermissionStatus(file, out);
out.writeLong(file.getModificationTime());
out.writeLong(file.getAccessTime());
out.writeShort(file.getFileReplication());
out.writeLong(file.getPreferredBlockSize());
}
private static void writeQuota(Quota.Counts quota, DataOutput out)
throws IOException {
out.writeLong(quota.get(Quota.NAMESPACE));
out.writeLong(quota.get(Quota.DISKSPACE));
}
/**
* Serialize a {@link INodeDirectory}
* @param node The node to write
* @param out The {@link DataOutput} where the fields are written
*/
public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
throws IOException {
writeLocalName(node, out);
out.writeLong(node.getId());
out.writeShort(0); // replication
out.writeLong(node.getModificationTime());
out.writeLong(0); // access time
out.writeLong(0); // preferred block size
out.writeInt(-1); // # of blocks
writeQuota(node.getQuotaCounts(), out);
if (node instanceof INodeDirectorySnapshottable) {
out.writeBoolean(true);
} else {
out.writeBoolean(false);
out.writeBoolean(node.isWithSnapshot());
}
writePermissionStatus(node, out);
}
/**
* Serialize a {@link INodeDirectory}
* @param a The node to write
* @param out The {@link DataOutput} where the fields are written
*/
public static void writeINodeDirectoryAttributes(
INodeDirectoryAttributes a, DataOutput out) throws IOException {
writeLocalName(a, out);
writePermissionStatus(a, out);
out.writeLong(a.getModificationTime());
writeQuota(a.getQuotaCounts(), out);
}
/**
* Serialize a {@link INodeSymlink} node
* @param node The node to write
* @param out The {@link DataOutput} where the fields are written
*/
private static void writeINodeSymlink(INodeSymlink node, DataOutput out)
throws IOException {
writeLocalName(node, out);
out.writeLong(node.getId());
out.writeShort(0); // replication
out.writeLong(0); // modification time
out.writeLong(0); // access time
out.writeLong(0); // preferred block size
out.writeInt(-2); // # of blocks
Text.writeString(out, node.getSymlinkString());
writePermissionStatus(node, out);
}
/** Serialize a {@link INodeReference} node */
private static void writeINodeReference(INodeReference ref, DataOutput out,
boolean writeUnderConstruction, ReferenceMap referenceMap
) throws IOException {
writeLocalName(ref, out);
out.writeLong(ref.getId());
out.writeShort(0); // replication
out.writeLong(0); // modification time
out.writeLong(0); // access time
out.writeLong(0); // preferred block size
out.writeInt(-3); // # of blocks
final boolean isWithName = ref instanceof INodeReference.WithName;
out.writeBoolean(isWithName);
if (!isWithName) {
Preconditions.checkState(ref instanceof INodeReference.DstReference);
// dst snapshot id
out.writeInt(((INodeReference.DstReference) ref).getDstSnapshotId());
} else {
out.writeInt(((INodeReference.WithName) ref).getLastSnapshotId());
}
final INodeReference.WithCount withCount
= (INodeReference.WithCount)ref.getReferredINode();
referenceMap.writeINodeReferenceWithCount(withCount, out,
writeUnderConstruction);
}
/**
* Save one inode's attributes to the image.
*/
public static void saveINode2Image(INode node, DataOutput out,
boolean writeUnderConstruction, ReferenceMap referenceMap)
throws IOException {
if (node.isReference()) {
writeINodeReference(node.asReference(), out, writeUnderConstruction,
referenceMap);
} else if (node.isDirectory()) {
writeINodeDirectory(node.asDirectory(), out);
} else if (node.isSymlink()) {
writeINodeSymlink(node.asSymlink(), out);
} else if (node.isFile()) {
writeINodeFile(node.asFile(), out, writeUnderConstruction);
}
}
// This should be reverted to package private once the ImageLoader
// code is moved into this package. This method should not be called
// by other code.
@ -430,12 +227,6 @@ public class FSImageSerialization {
return createdNodeName;
}
private static void writeLocalName(INodeAttributes inode, DataOutput out)
throws IOException {
final byte[] name = inode.getLocalNameBytes();
writeBytes(name, out);
}
public static void writeBytes(byte[] data, DataOutput out)
throws IOException {
out.writeShort(data.length);

View File

@ -6022,42 +6022,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
leaseManager.changeLease(src, dst);
}
/**
* Serializes leases.
*/
void saveFilesUnderConstruction(DataOutputStream out,
Map<Long, INodeFile> snapshotUCMap) throws IOException {
// This is run by an inferior thread of saveNamespace, which holds a read
// lock on our behalf. If we took the read lock here, we could block
// for fairness if a writer is waiting on the lock.
synchronized (leaseManager) {
Map<String, INodeFile> nodes = leaseManager.getINodesUnderConstruction();
for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
// TODO: for HDFS-5428, because of rename operations, some
// under-construction files that are
// in the current fs directory can also be captured in the
// snapshotUCMap. We should remove them from the snapshotUCMap.
snapshotUCMap.remove(entry.getValue().getId());
}
out.writeInt(nodes.size() + snapshotUCMap.size()); // write the size
for (Map.Entry<String, INodeFile> entry : nodes.entrySet()) {
FSImageSerialization.writeINodeUnderConstruction(
out, entry.getValue(), entry.getKey());
}
for (Map.Entry<Long, INodeFile> entry : snapshotUCMap.entrySet()) {
// for those snapshot INodeFileUC, we use "/.reserved/.inodes/<inodeid>"
// as their paths
StringBuilder b = new StringBuilder();
b.append(FSDirectory.DOT_RESERVED_PATH_PREFIX)
.append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING)
.append(Path.SEPARATOR).append(entry.getValue().getId());
FSImageSerialization.writeINodeUnderConstruction(
out, entry.getValue(), b.toString());
}
}
}
/**
* @return all the under-construction files in the lease map
*/
@ -6334,15 +6298,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync();
}
/**
* @param out save state of the secret manager
* @param sdPath String storage directory path
*/
void saveSecretManagerStateCompat(DataOutputStream out, String sdPath)
throws IOException {
dtSecretManager.saveSecretManagerStateCompat(out, sdPath);
}
SecretManagerState saveSecretManagerState() {
return dtSecretManager.saveSecretManagerState();
}

View File

@ -17,17 +17,13 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import com.google.common.base.Preconditions;
import java.util.List;
/**
* The difference of an inode between in two snapshots.
@ -133,11 +129,4 @@ abstract class AbstractINodeDiff<N extends INode,
return getClass().getSimpleName() + ": " + this.getSnapshotId() + " (post="
+ (posteriorDiff == null? null: posteriorDiff.getSnapshotId()) + ")";
}
void writeSnapshot(DataOutput out) throws IOException {
out.writeInt(snapshotId);
}
abstract void write(DataOutput out, ReferenceMap referenceMap
) throws IOException;
}

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
@ -34,7 +32,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.namenode.Content;
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
@ -42,7 +39,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.util.Diff.Container;
import org.apache.hadoop.hdfs.util.Diff.ListType;
@ -124,35 +120,6 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
return counts;
}
/** Serialize {@link #created} */
private void writeCreated(DataOutput out) throws IOException {
final List<INode> created = getList(ListType.CREATED);
out.writeInt(created.size());
for (INode node : created) {
// For INode in created list, we only need to record its local name
byte[] name = node.getLocalNameBytes();
out.writeShort(name.length);
out.write(name);
}
}
/** Serialize {@link #deleted} */
private void writeDeleted(DataOutput out,
ReferenceMap referenceMap) throws IOException {
final List<INode> deleted = getList(ListType.DELETED);
out.writeInt(deleted.size());
for (INode node : deleted) {
FSImageSerialization.saveINode2Image(node, out, true, referenceMap);
}
}
/** Serialize to out */
private void write(DataOutput out, ReferenceMap referenceMap
) throws IOException {
writeCreated(out);
writeDeleted(out, referenceMap);
}
/** Get the list of INodeDirectory contained in the deleted list */
private void getDirsInDeleted(List<INodeDirectory> dirList) {
for (INode node : getList(ListType.DELETED)) {
@ -347,25 +314,6 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
return childrenSize;
}
@Override
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshot(out);
out.writeInt(childrenSize);
// Write snapshotINode
out.writeBoolean(isSnapshotRoot);
if (!isSnapshotRoot) {
if (snapshotINode != null) {
out.writeBoolean(true);
FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out);
} else {
out.writeBoolean(false);
}
}
// Write diff. Node need to write poseriorDiff, since diffs is a list.
diff.write(out, referenceMap);
}
@Override
Quota.Counts destroyDiffAndCollectBlocks(INodeDirectory currentINode,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {

View File

@ -17,17 +17,13 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import java.util.List;
/**
* The difference of an {@link INodeFile} between two snapshots.
@ -70,20 +66,6 @@ public class FileDiff extends
+ (snapshotINode == null? "?": snapshotINode.getFileReplication());
}
@Override
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshot(out);
out.writeLong(fileSize);
// write snapshotINode
if (snapshotINode != null) {
out.writeBoolean(true);
FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
} else {
out.writeBoolean(false);
}
}
@Override
Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Arrays;
@ -31,7 +30,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
@ -216,11 +214,4 @@ public class Snapshot implements Comparable<byte[]> {
public String toString() {
return getClass().getSimpleName() + "." + root.getLocalName() + "(id=" + id + ")";
}
/** Serialize the fields to out */
void write(DataOutput out) throws IOException {
out.writeInt(id);
// write root
FSImageSerialization.writeINodeDirectory(root, out);
}
}

View File

@ -29,75 +29,21 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader;
/**
* A helper class defining static methods for reading/writing snapshot related
* information from/to FSImage.
*/
public class SnapshotFSImageFormat {
/**
* Save snapshots and snapshot quota for a snapshottable directory.
* @param current The directory that the snapshots belongs to.
* @param out The {@link DataOutput} to write.
* @throws IOException
*/
public static void saveSnapshots(INodeDirectorySnapshottable current,
DataOutput out) throws IOException {
// list of snapshots in snapshotsByNames
ReadOnlyList<Snapshot> snapshots = current.getSnapshotsByNames();
out.writeInt(snapshots.size());
for (Snapshot s : snapshots) {
// write the snapshot id
out.writeInt(s.getId());
}
// snapshot quota
out.writeInt(current.getSnapshotQuota());
}
/**
* Save SnapshotDiff list for an INodeDirectoryWithSnapshot.
* @param sNode The directory that the SnapshotDiff list belongs to.
* @param out The {@link DataOutput} to write.
*/
private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>>
void saveINodeDiffs(final AbstractINodeDiffList<N, A, D> diffs,
final DataOutput out, ReferenceMap referenceMap) throws IOException {
// Record the diffs in reversed order, so that we can find the correct
// reference for INodes in the created list when loading the FSImage
if (diffs == null) {
out.writeInt(-1); // no diffs
} else {
final List<D> list = diffs.asList();
final int size = list.size();
out.writeInt(size);
for (int i = size - 1; i >= 0; i--) {
list.get(i).write(out, referenceMap);
}
}
}
public static void saveDirectoryDiffList(final INodeDirectory dir,
final DataOutput out, final ReferenceMap referenceMap
) throws IOException {
saveINodeDiffs(dir.getDiffs(), out, referenceMap);
}
public static void saveFileDiffList(final INodeFile file,
final DataOutput out) throws IOException {
saveINodeDiffs(file.getDiffs(), out, null);
}
public static FileDiffList loadFileDiffList(DataInput in,
FSImageFormat.Loader loader) throws IOException {
final int size = in.readInt();
@ -319,23 +265,6 @@ public class SnapshotFSImageFormat {
*/
private final Map<Long, Long> dirMap = new HashMap<Long, Long>();
public void writeINodeReferenceWithCount(
INodeReference.WithCount withCount, DataOutput out,
boolean writeUnderConstruction) throws IOException {
final INode referred = withCount.getReferredINode();
final long id = withCount.getId();
final boolean firstReferred = !referenceMap.containsKey(id);
out.writeBoolean(firstReferred);
if (firstReferred) {
FSImageSerialization.saveINode2Image(referred, out,
writeUnderConstruction, this);
referenceMap.put(id, withCount);
} else {
out.writeLong(id);
}
}
public boolean toProcessSubtree(long id) {
if (dirMap.containsKey(id)) {
return false;

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
@ -287,22 +286,6 @@ public class SnapshotManager implements SnapshotStats {
new INodeDirectorySnapshottable[snapshottables.size()]);
}
/**
* Write {@link #snapshotCounter}, {@link #numSnapshots},
* and all snapshots to the DataOutput.
*/
public void write(DataOutput out) throws IOException {
out.writeInt(snapshotCounter);
out.writeInt(numSnapshots.get());
// write all snapshots.
for(INodeDirectorySnapshottable snapshottableDir : snapshottables.values()) {
for(Snapshot s : snapshottableDir.getSnapshotsByNames()) {
s.write(out);
}
}
}
/**
* Read values of {@link #snapshotCounter}, {@link #numSnapshots}, and
* all snapshots from the DataInput