Merge trunk to HDFS-4685.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4685@1551332 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2013-12-16 20:08:58 +00:00
commit 740605cde6
59 changed files with 1674 additions and 1046 deletions

View File

@ -249,9 +249,16 @@ Trunk (Unreleased)
HDFS-5647. Merge INodeDirectory.Feature and INodeFile.Feature. (Haohui Mai
via jing9)
HDFS-5632. Flatten INodeDirectory hierarchy: Replace
INodeDirectoryWithSnapshot with DirectoryWithSnapshotFeature.
(jing9 via szetszwo)
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
HDFS-5665. Remove the unnecessary writeLock while initializing CacheManager
in FsNameSystem Ctor. (Uma Maheswara Rao G via Andrew Wang)
BUG FIXES
HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik Kumar
via cmccabe)
@ -447,6 +454,12 @@ Trunk (Unreleased)
HDFS-5626. dfsadmin -report shows incorrect cache values. (cmccabe)
HDFS-5406. Send incremental block reports for all storages in a
single call. (Arpit Agarwal)
HDFS-5454. DataNode UUID should be assigned prior to FsDataset
initialization. (Arpit Agarwal)
BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
HDFS-4985. Add storage type to the protocol and expose it in block report
@ -810,6 +823,9 @@ Release 2.3.0 - UNRELEASED
HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
jing9)
HDFS-5592. statechangeLog of completeFile should be logged only in case of success.
(Vinayakumar via umamahesh)
OPTIMIZATIONS
BUG FIXES
@ -953,6 +969,8 @@ Release 2.3.0 - UNRELEASED
HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
HDFS-5666. Fix inconsistent synchronization in BPOfferService (jxiang via cmccabe)
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES

View File

@ -148,7 +148,7 @@ boolean isAlive() {
return false;
}
String getBlockPoolId() {
synchronized String getBlockPoolId() {
if (bpNSInfo != null) {
return bpNSInfo.getBlockPoolID();
} else {
@ -163,7 +163,7 @@ synchronized NamespaceInfo getNamespaceInfo() {
}
@Override
public String toString() {
public synchronized String toString() {
if (bpNSInfo == null) {
// If we haven't yet connected to our NN, we don't yet know our
// own block pool ID.

View File

@ -22,6 +22,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
@ -273,7 +274,8 @@ void reportBadBlocks(ExtendedBlock block,
private void reportReceivedDeletedBlocks() throws IOException {
// Generate a list of the pending reports for each storage under the lock
Map<String, ReceivedDeletedBlockInfo[]> blockArrays = Maps.newHashMap();
ArrayList<StorageReceivedDeletedBlocks> reports =
new ArrayList<StorageReceivedDeletedBlocks>(pendingIncrementalBRperStorage.size());
synchronized (pendingIncrementalBRperStorage) {
for (Map.Entry<String, PerStoragePendingIncrementalBR> entry :
pendingIncrementalBRperStorage.entrySet()) {
@ -286,33 +288,34 @@ private void reportReceivedDeletedBlocks() throws IOException {
pendingReceivedRequests =
(pendingReceivedRequests > rdbi.length ?
(pendingReceivedRequests - rdbi.length) : 0);
blockArrays.put(storageUuid, rdbi);
reports.add(new StorageReceivedDeletedBlocks(storageUuid, rdbi));
}
}
}
// Send incremental block reports to the Namenode outside the lock
for (Map.Entry<String, ReceivedDeletedBlockInfo[]> entry :
blockArrays.entrySet()) {
final String storageUuid = entry.getKey();
final ReceivedDeletedBlockInfo[] rdbi = entry.getValue();
if (reports.size() == 0) {
// Nothing new to report.
return;
}
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
storageUuid, rdbi) };
boolean success = false;
try {
bpNamenode.blockReceivedAndDeleted(bpRegistration,
bpos.getBlockPoolId(), report);
success = true;
} finally {
if (!success) {
synchronized (pendingIncrementalBRperStorage) {
// Send incremental block reports to the Namenode outside the lock
boolean success = false;
try {
bpNamenode.blockReceivedAndDeleted(bpRegistration,
bpos.getBlockPoolId(),
reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]));
success = true;
} finally {
if (!success) {
synchronized (pendingIncrementalBRperStorage) {
for (StorageReceivedDeletedBlocks report : reports) {
// If we didn't succeed in sending the report, put all of the
// blocks back onto our queue, but only in the case where we
// didn't put something newer in the meantime.
PerStoragePendingIncrementalBR perStorageMap =
pendingIncrementalBRperStorage.get(storageUuid);
pendingReceivedRequests += perStorageMap.putMissingBlockInfos(rdbi);
pendingIncrementalBRperStorage.get(report.getStorageID());
pendingReceivedRequests +=
perStorageMap.putMissingBlockInfos(report.getBlocks());
}
}
}

View File

@ -815,8 +815,6 @@ DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo)
storageInfo = new StorageInfo(nsInfo);
}
checkDatanodeUuid();
DatanodeID dnId = new DatanodeID(
streamingAddr.getAddress().getHostAddress(), hostName,
storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
@ -965,6 +963,9 @@ private void initStorage(final NamespaceInfo nsInfo) throws IOException {
+ ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
}
// If this is a newly formatted DataNode then assign a new DatanodeUuid.
checkDatanodeUuid();
synchronized(this) {
if (data == null) {
data = factory.newInstance(this, storage, conf);

View File

@ -153,6 +153,10 @@ void verifyQuota(long nsDelta, long dsDelta) throws QuotaExceededException {
verifyNamespaceQuota(nsDelta);
verifyDiskspaceQuota(dsDelta);
}
boolean isQuotaSet() {
return nsQuota >= 0 || dsQuota >= 0;
}
private String namespaceString() {
return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);

View File

@ -67,7 +67,6 @@
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
import org.apache.hadoop.hdfs.util.ByteArray;
@ -622,8 +621,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
// snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree.
if (isSrcInSnapshot) {
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot(),
inodeMap);
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot());
srcIIP.setLastINode(srcChild);
}
@ -692,11 +690,9 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
}
// update modification time of dst and the parent of src
final INode srcParent = srcIIP.getINode(-2);
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot(),
inodeMap);
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot());
dstParent = dstIIP.getINode(-2); // refresh dstParent
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot(),
inodeMap);
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot());
// update moved leases with new filename
getFSNamesystem().unprotectedChangeLease(src, dst);
@ -734,11 +730,10 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
}
if (isSrcInSnapshot) {
// srcParent must be an INodeDirectoryWithSnapshot instance since
// isSrcInSnapshot is true and src node has been removed from
// srcParent
((INodeDirectoryWithSnapshot) srcParent).undoRename4ScrParent(
oldSrcChild.asReference(), srcChild, srcIIP.getLatestSnapshot());
// srcParent must have snapshot feature since isSrcInSnapshot is true
// and src node has been removed from srcParent
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild,
srcIIP.getLatestSnapshot());
} else {
// original srcChild is not in latest snapshot, we only need to add
// the srcChild back
@ -879,8 +874,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
// snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree.
if (isSrcInSnapshot) {
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot(),
inodeMap);
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot());
srcIIP.setLastINode(srcChild);
}
@ -958,11 +952,9 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
}
final INode srcParent = srcIIP.getINode(-2);
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot(),
inodeMap);
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot());
dstParent = dstIIP.getINode(-2);
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot(),
inodeMap);
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot());
// update moved lease with new filename
getFSNamesystem().unprotectedChangeLease(src, dst);
@ -1019,9 +1011,9 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
withCount.getReferredINode().setLocalName(srcChildName);
}
if (srcParent instanceof INodeDirectoryWithSnapshot) {
((INodeDirectoryWithSnapshot) srcParent).undoRename4ScrParent(
oldSrcChild.asReference(), srcChild, srcIIP.getLatestSnapshot());
if (srcParent.isWithSnapshot()) {
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild,
srcIIP.getLatestSnapshot());
} else {
// srcParent is not an INodeDirectoryWithSnapshot, we only need to add
// the srcChild back
@ -1030,9 +1022,9 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
}
if (undoRemoveDst) {
// Rename failed - restore dst
if (dstParent instanceof INodeDirectoryWithSnapshot) {
((INodeDirectoryWithSnapshot) dstParent).undoRename4DstParent(
removedDst, dstIIP.getLatestSnapshot());
if (dstParent.isDirectory() && dstParent.asDirectory().isWithSnapshot()) {
dstParent.asDirectory().undoRename4DstParent(removedDst,
dstIIP.getLatestSnapshot());
} else {
addLastINodeNoQuotaCheck(dstIIP, removedDst);
}
@ -1163,8 +1155,7 @@ void unprotectedSetPermission(String src, FsPermission permissions)
if (inode == null) {
throw new FileNotFoundException("File does not exist: " + src);
}
inode.setPermission(permissions, inodesInPath.getLatestSnapshot(),
inodeMap);
inode.setPermission(permissions, inodesInPath.getLatestSnapshot());
}
void setOwner(String src, String username, String groupname)
@ -1189,11 +1180,10 @@ void unprotectedSetOwner(String src, String username, String groupname)
throw new FileNotFoundException("File does not exist: " + src);
}
if (username != null) {
inode = inode.setUser(username, inodesInPath.getLatestSnapshot(),
inodeMap);
inode = inode.setUser(username, inodesInPath.getLatestSnapshot());
}
if (groupname != null) {
inode.setGroup(groupname, inodesInPath.getLatestSnapshot(), inodeMap);
inode.setGroup(groupname, inodesInPath.getLatestSnapshot());
}
}
@ -1266,7 +1256,7 @@ void unprotectedConcat(String target, String [] srcs, long timestamp)
if(nodeToRemove == null) continue;
nodeToRemove.setBlocks(null);
trgParent.removeChild(nodeToRemove, trgLatestSnapshot, null);
trgParent.removeChild(nodeToRemove, trgLatestSnapshot);
inodeMap.remove(nodeToRemove);
count++;
}
@ -1274,8 +1264,8 @@ void unprotectedConcat(String target, String [] srcs, long timestamp)
// update inodeMap
removeFromInodeMap(Arrays.asList(allSrcInodes));
trgInode.setModificationTime(timestamp, trgLatestSnapshot, inodeMap);
trgParent.updateModificationTime(timestamp, trgLatestSnapshot, inodeMap);
trgInode.setModificationTime(timestamp, trgLatestSnapshot);
trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
// update quota on the parent directory ('count' files removed, 0 space)
unprotectedUpdateCount(trgIIP, trgINodes.length-1, -count, 0);
}
@ -1419,7 +1409,7 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
// record modification
final Snapshot latestSnapshot = iip.getLatestSnapshot();
targetNode = targetNode.recordModification(latestSnapshot, inodeMap);
targetNode = targetNode.recordModification(latestSnapshot);
iip.setLastINode(targetNode);
// Remove the node from the namespace
@ -1430,7 +1420,7 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
// set the parent's modification time
final INodeDirectory parent = targetNode.getParent();
parent.updateModificationTime(mtime, latestSnapshot, inodeMap);
parent.updateModificationTime(mtime, latestSnapshot);
if (removed == 0) {
return 0;
}
@ -2203,8 +2193,7 @@ private boolean addChild(INodesInPath iip, int pos,
final INodeDirectory parent = inodes[pos-1].asDirectory();
boolean added = false;
try {
added = parent.addChild(child, true, iip.getLatestSnapshot(),
inodeMap);
added = parent.addChild(child, true, iip.getLatestSnapshot());
} catch (QuotaExceededException e) {
updateCountNoQuotaCheck(iip, pos,
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
@ -2242,7 +2231,7 @@ private long removeLastINode(final INodesInPath iip)
final Snapshot latestSnapshot = iip.getLatestSnapshot();
final INode last = iip.getLastINode();
final INodeDirectory parent = iip.getINode(-2).asDirectory();
if (!parent.removeChild(last, latestSnapshot, inodeMap)) {
if (!parent.removeChild(last, latestSnapshot)) {
return -1;
}
INodeDirectory newParent = last.getParent();
@ -2394,7 +2383,7 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
}
final Snapshot latest = iip.getLatestSnapshot();
dirNode = dirNode.recordModification(latest, inodeMap);
dirNode = dirNode.recordModification(latest);
dirNode.setQuota(nsQuota, dsQuota);
return dirNode;
}
@ -2462,7 +2451,7 @@ private boolean unprotectedSetTimes(INode inode, long mtime,
assert hasWriteLock();
boolean status = false;
if (mtime != -1) {
inode = inode.setModificationTime(mtime, latest, inodeMap);
inode = inode.setModificationTime(mtime, latest);
status = true;
}
if (atime != -1) {
@ -2473,7 +2462,7 @@ private boolean unprotectedSetTimes(INode inode, long mtime,
if (atime <= inodeTime + getFSNamesystem().getAccessTimePrecision() && !force) {
status = false;
} else {
inode.setAccessTime(atime, latest, inodeMap);
inode.setAccessTime(atime, latest);
status = true;
}
}

View File

@ -31,18 +31,18 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
@ -55,11 +55,11 @@
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@ -354,8 +354,8 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// update the block list.
// Update the salient file attributes.
newFile.setAccessTime(addCloseOp.atime, null, fsDir.getINodeMap());
newFile.setModificationTime(addCloseOp.mtime, null, fsDir.getINodeMap());
newFile.setAccessTime(addCloseOp.atime, null);
newFile.setModificationTime(addCloseOp.mtime, null);
updateBlocks(fsDir, addCloseOp, newFile);
break;
}
@ -373,8 +373,8 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
// Update the salient file attributes.
file.setAccessTime(addCloseOp.atime, null, fsDir.getINodeMap());
file.setModificationTime(addCloseOp.mtime, null, fsDir.getINodeMap());
file.setAccessTime(addCloseOp.atime, null);
file.setModificationTime(addCloseOp.mtime, null);
updateBlocks(fsDir, addCloseOp, file);
// Now close the file

View File

@ -52,9 +52,9 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@ -731,9 +731,10 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
if (nsQuota >= 0 || dsQuota >= 0) {
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
}
return snapshottable ? new INodeDirectorySnapshottable(dir)
: withSnapshot ? new INodeDirectoryWithSnapshot(dir)
: dir;
if (withSnapshot) {
dir.addSnapshotFeature(null);
}
return snapshottable ? new INodeDirectorySnapshottable(dir) : dir;
} else if (numBlocks == -2) {
//symlink
@ -1113,10 +1114,10 @@ private void saveImage(INodeDirectory current, DataOutputStream out,
final ReadOnlyList<INode> children = current.getChildrenList(null);
int dirNum = 0;
List<INodeDirectory> snapshotDirs = null;
if (current instanceof INodeDirectoryWithSnapshot) {
DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature();
if (sf != null) {
snapshotDirs = new ArrayList<INodeDirectory>();
((INodeDirectoryWithSnapshot) current).getSnapshotDirectory(
snapshotDirs);
sf.getSnapshotDirectory(snapshotDirs);
dirNum += snapshotDirs.size();
}

View File

@ -36,7 +36,6 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import org.apache.hadoop.io.IntWritable;
@ -239,7 +238,7 @@ public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
out.writeBoolean(true);
} else {
out.writeBoolean(false);
out.writeBoolean(node instanceof INodeDirectoryWithSnapshot);
out.writeBoolean(node.isWithSnapshot());
}
writePermissionStatus(node, out);

View File

@ -737,12 +737,7 @@ public static FSNamesystem loadFromDisk(Configuration conf)
this.dtSecretManager = createDelegationTokenSecretManager(conf);
this.dir = new FSDirectory(fsImage, this, conf);
this.snapshotManager = new SnapshotManager(dir);
writeLock();
try {
this.cacheManager = new CacheManager(this, conf, blockManager);
} finally {
writeUnlock();
}
this.cacheManager = new CacheManager(this, conf, blockManager);
this.safeMode = new SafeModeInfo(conf);
this.auditLoggers = initAuditLoggers(conf);
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
@ -2297,7 +2292,7 @@ LocatedBlock prepareFileForWrite(String src, INodeFile file,
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
boolean writeToEditLog, Snapshot latestSnapshot, boolean logRetryCache)
throws IOException {
file = file.recordModification(latestSnapshot, dir.getINodeMap());
file = file.recordModification(latestSnapshot);
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine,
clientNode);
@ -2879,8 +2874,10 @@ boolean completeFile(String src, String holder,
writeUnlock();
}
getEditLog().logSync();
NameNode.stateChangeLog.info("DIR* completeFile: " + src + " is closed by "
+ holder);
if (success) {
NameNode.stateChangeLog.info("DIR* completeFile: " + src
+ " is closed by " + holder);
}
return success;
}
@ -3785,8 +3782,7 @@ private void finalizeINodeFileUnderConstruction(String src,
Preconditions.checkArgument(uc != null);
leaseManager.removeLease(uc.getClientName(), src);
pendingFile = pendingFile.recordModification(latestSnapshot,
dir.getINodeMap());
pendingFile = pendingFile.recordModification(latestSnapshot);
// The file is no longer pending.
// Create permanent INode, update blocks. No need to replace the inode here

View File

@ -35,7 +35,6 @@
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
import org.apache.hadoop.hdfs.util.Diff;
@ -96,9 +95,9 @@ public final String getUserName() {
abstract void setUser(String user);
/** Set user */
final INode setUser(String user, Snapshot latest, INodeMap inodeMap)
final INode setUser(String user, Snapshot latest)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest, inodeMap);
final INode nodeToUpdate = recordModification(latest);
nodeToUpdate.setUser(user);
return nodeToUpdate;
}
@ -120,9 +119,9 @@ public final String getGroupName() {
abstract void setGroup(String group);
/** Set group */
final INode setGroup(String group, Snapshot latest, INodeMap inodeMap)
final INode setGroup(String group, Snapshot latest)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest, inodeMap);
final INode nodeToUpdate = recordModification(latest);
nodeToUpdate.setGroup(group);
return nodeToUpdate;
}
@ -145,9 +144,9 @@ public final FsPermission getFsPermission() {
abstract void setPermission(FsPermission permission);
/** Set the {@link FsPermission} of this {@link INode} */
INode setPermission(FsPermission permission, Snapshot latest,
INodeMap inodeMap) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest, inodeMap);
INode setPermission(FsPermission permission, Snapshot latest)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest);
nodeToUpdate.setPermission(permission);
return nodeToUpdate;
}
@ -231,14 +230,12 @@ public final boolean shouldRecordInSrcSnapshot(final Snapshot latestInDst) {
*
* @param latest the latest snapshot that has been taken.
* Note that it is null if no snapshots have been taken.
* @param inodeMap while recording modification, the inode or its parent may
* get replaced, and the inodeMap needs to be updated.
* @return The current inode, which usually is the same object of this inode.
* However, in some cases, this inode may be replaced with a new inode
* for maintaining snapshots. The current inode is then the new inode.
*/
abstract INode recordModification(final Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException;
abstract INode recordModification(final Snapshot latest)
throws QuotaExceededException;
/** Check whether it's a reference. */
public boolean isReference() {
@ -318,7 +315,7 @@ public INodeSymlink asSymlink() {
* Call recordModification(..) to capture the current states.
* Mark the INode as deleted.
*
* 1.4 The current inode is a {@link INodeDirectoryWithSnapshot}.
* 1.4 The current inode is an {@link INodeDirectory} with snapshot feature.
* Call recordModification(..) to capture the current states.
* Destroy files/directories created after the latest snapshot
* (i.e., the inodes stored in the created list of the latest snapshot).
@ -329,7 +326,7 @@ public INodeSymlink asSymlink() {
* 2.2 To clean {@link INodeDirectory}: recursively clean its children.
* 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in
* its diff list.
* 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding
* 2.4 To clean {@link INodeDirectory} with snapshot: delete the corresponding
* snapshot in its diff list. Recursively clean its children.
* </pre>
*
@ -575,16 +572,16 @@ public final long getModificationTime() {
}
/** Update modification time if it is larger than the current value. */
public abstract INode updateModificationTime(long mtime, Snapshot latest,
INodeMap inodeMap) throws QuotaExceededException;
public abstract INode updateModificationTime(long mtime, Snapshot latest)
throws QuotaExceededException;
/** Set the last modification time of inode. */
public abstract void setModificationTime(long modificationTime);
/** Set the last modification time of inode. */
public final INode setModificationTime(long modificationTime,
Snapshot latest, INodeMap inodeMap) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest, inodeMap);
Snapshot latest) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest);
nodeToUpdate.setModificationTime(modificationTime);
return nodeToUpdate;
}
@ -611,9 +608,9 @@ public final long getAccessTime() {
/**
* Set last access time of inode.
*/
public final INode setAccessTime(long accessTime, Snapshot latest,
INodeMap inodeMap) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest, inodeMap);
public final INode setAccessTime(long accessTime, Snapshot latest)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latest);
nodeToUpdate.setAccessTime(accessTime);
return nodeToUpdate;
}
@ -753,8 +750,9 @@ public void clear() {
}
}
/** INode feature such as {@link FileUnderConstructionFeature}
* and {@link DirectoryWithQuotaFeature}.
/**
* INode feature such as {@link FileUnderConstructionFeature}
* and {@link DirectoryWithQuotaFeature}.
*/
public interface Feature {
}

View File

@ -32,9 +32,11 @@
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.annotations.VisibleForTesting;
@ -168,40 +170,49 @@ DirectoryWithQuotaFeature addDirectoryWithQuotaFeature(
private int searchChildren(byte[] name) {
return children == null? -1: Collections.binarySearch(children, name);
}
protected DirectoryWithSnapshotFeature addSnapshotFeature(
DirectoryDiffList diffs) {
Preconditions.checkState(!isWithSnapshot(),
"Directory is already with snapshot");
DirectoryWithSnapshotFeature sf = new DirectoryWithSnapshotFeature(diffs);
addFeature(sf);
return sf;
}
/**
* Remove the specified child from this directory.
*
* @param child the child inode to be removed
* @param latest See {@link INode#recordModification(Snapshot, INodeMap)}.
* If feature list contains a {@link DirectoryWithSnapshotFeature}, return it;
* otherwise, return null.
*/
public boolean removeChild(INode child, Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
.removeChild(child, latest, inodeMap);
public final DirectoryWithSnapshotFeature getDirectoryWithSnapshotFeature() {
for (Feature f : features) {
if (f instanceof DirectoryWithSnapshotFeature) {
return (DirectoryWithSnapshotFeature) f;
}
}
return removeChild(child);
return null;
}
/**
* Remove the specified child from this directory.
* The basic remove method which actually calls children.remove(..).
*
* @param child the child inode to be removed
*
* @return true if the child is removed; false if the child is not found.
*/
protected final boolean removeChild(final INode child) {
final int i = searchChildren(child.getLocalNameBytes());
if (i < 0) {
return false;
}
final INode removed = children.remove(i);
Preconditions.checkState(removed == child);
return true;
/** Is this file has the snapshot feature? */
public final boolean isWithSnapshot() {
return getDirectoryWithSnapshotFeature() != null;
}
public DirectoryDiffList getDiffs() {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
return sf != null ? sf.getDiffs() : null;
}
@Override
public INodeDirectoryAttributes getSnapshotINode(Snapshot snapshot) {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshot, this);
}
@Override
public String toDetailString() {
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
return super.toDetailString() + (sf == null ? "" : ", " + sf.getDiffs());
}
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
@ -210,16 +221,11 @@ public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
"this is already an INodeDirectorySnapshottable, this=%s", this);
final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this);
replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs()
.saveSelf2Snapshot(latest, s, this);
return s;
}
/** Replace itself with an {@link INodeDirectoryWithSnapshot}. */
public INodeDirectoryWithSnapshot replaceSelf4INodeDirectoryWithSnapshot(
final INodeMap inodeMap) {
return replaceSelf(new INodeDirectoryWithSnapshot(this), inodeMap);
}
/** Replace itself with {@link INodeDirectory}. */
public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
Preconditions.checkState(getClass() != INodeDirectory.class,
@ -245,7 +251,13 @@ private final <N extends INodeDirectory> N replaceSelf(final N newDir,
return newDir;
}
/** Replace the given child with a new child. */
/**
* Replace the given child with a new child. Note that we no longer need to
* replace an normal INodeDirectory or INodeFile into an
* INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
* for child replacement is for {@link INodeDirectorySnapshottable} and
* reference nodes.
*/
public void replaceChild(INode oldChild, final INode newChild,
final INodeMap inodeMap) {
Preconditions.checkNotNull(children);
@ -256,24 +268,24 @@ public void replaceChild(INode oldChild, final INode newChild,
.asReference().getReferredINode());
oldChild = children.get(i);
if (oldChild.isReference() && !newChild.isReference()) {
// replace the referred inode, e.g.,
// INodeFileWithSnapshot -> INodeFileUnderConstructionWithSnapshot
final INode withCount = oldChild.asReference().getReferredINode();
withCount.asReference().setReferredINode(newChild);
} else {
if (oldChild.isReference()) {
// both are reference nodes, e.g., DstReference -> WithName
final INodeReference.WithCount withCount =
(WithCount) oldChild.asReference().getReferredINode();
withCount.removeReference(oldChild.asReference());
}
children.set(i, newChild);
if (oldChild.isReference() && newChild.isReference()) {
// both are reference nodes, e.g., DstReference -> WithName
final INodeReference.WithCount withCount =
(WithCount) oldChild.asReference().getReferredINode();
withCount.removeReference(oldChild.asReference());
}
children.set(i, newChild);
// replace the instance in the created list of the diff list
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf != null) {
sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
}
// update the inodeMap
if (inodeMap != null) {
inodeMap.put(newChild);
}
}
}
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
@ -298,14 +310,18 @@ INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
}
@Override
public INodeDirectory recordModification(Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
.recordModification(latest, inodeMap);
} else {
return this;
public INodeDirectory recordModification(Snapshot latest)
throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
// add snapshot feature if necessary
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = addSnapshotFeature(null);
}
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latest, this, null);
}
return this;
}
/**
@ -314,13 +330,17 @@ public INodeDirectory recordModification(Snapshot latest,
* @return the child inode, which may be replaced.
*/
public INode saveChild2Snapshot(final INode child, final Snapshot latest,
final INode snapshotCopy, final INodeMap inodeMap)
throws QuotaExceededException {
final INode snapshotCopy) throws QuotaExceededException {
if (latest == null) {
return child;
}
return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
.saveChild2Snapshot(child, latest, snapshotCopy, inodeMap);
// add snapshot feature if necessary
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.saveChild2Snapshot(this, child, latest, snapshotCopy);
}
/**
@ -331,9 +351,36 @@ public INode saveChild2Snapshot(final INode child, final Snapshot latest,
* @return the child inode.
*/
public INode getChild(byte[] name, Snapshot snapshot) {
final ReadOnlyList<INode> c = getChildrenList(snapshot);
final int i = ReadOnlyList.Util.binarySearch(c, name);
return i < 0? null: c.get(i);
DirectoryWithSnapshotFeature sf;
if (snapshot == null || (sf = getDirectoryWithSnapshotFeature()) == null) {
ReadOnlyList<INode> c = getCurrentChildrenList();
final int i = ReadOnlyList.Util.binarySearch(c, name);
return i < 0 ? null : c.get(i);
}
return sf.getChild(this, name, snapshot);
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current directory.
* @return the current children list if the specified snapshot is null;
* otherwise, return the children list corresponding to the snapshot.
* Note that the returned list is never null.
*/
public ReadOnlyList<INode> getChildrenList(final Snapshot snapshot) {
DirectoryWithSnapshotFeature sf;
if (snapshot == null
|| (sf = this.getDirectoryWithSnapshotFeature()) == null) {
return getCurrentChildrenList();
}
return sf.getChildrenList(this, snapshot);
}
private ReadOnlyList<INode> getCurrentChildrenList() {
return children == null ? ReadOnlyList.Util.<INode> emptyList()
: ReadOnlyList.Util.asReadOnlyList(children);
}
/** @return the {@link INodesInPath} containing only the last inode. */
@ -399,6 +446,41 @@ static int nextChild(ReadOnlyList<INode> children, byte[] name) {
}
return -nextPos;
}
/**
* Remove the specified child from this directory.
*/
public boolean removeChild(INode child, Snapshot latest)
throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
// create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.removeChild(this, child, latest);
}
return removeChild(child);
}
/**
* Remove the specified child from this directory.
* The basic remove method which actually calls children.remove(..).
*
* @param child the child inode to be removed
*
* @return true if the child is removed; false if the child is not found.
*/
public boolean removeChild(final INode child) {
final int i = searchChildren(child.getLocalNameBytes());
if (i < 0) {
return false;
}
final INode removed = children.remove(i);
Preconditions.checkState(removed == child);
return true;
}
/**
* Add a child inode to the directory.
@ -407,34 +489,32 @@ static int nextChild(ReadOnlyList<INode> children, byte[] name) {
* @param setModTime set modification time for the parent node
* not needed when replaying the addition and
* the parent already has the proper mod time
* @param inodeMap update the inodeMap if the directory node gets replaced
* @return false if the child with this name already exists;
* otherwise, return true;
*/
public boolean addChild(INode node, final boolean setModTime,
final Snapshot latest, final INodeMap inodeMap)
throws QuotaExceededException {
final Snapshot latest) throws QuotaExceededException {
final int low = searchChildren(node.getLocalNameBytes());
if (low >= 0) {
return false;
}
if (isInLatestSnapshot(latest)) {
INodeDirectoryWithSnapshot sdir =
replaceSelf4INodeDirectoryWithSnapshot(inodeMap);
boolean added = sdir.addChild(node, setModTime, latest, inodeMap);
return added;
// create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.addChild(this, node, setModTime, latest);
}
addChild(node, low);
if (setModTime) {
// update modification time of the parent directory
updateModificationTime(node.getModificationTime(), latest, inodeMap);
updateModificationTime(node.getModificationTime(), latest);
}
return true;
}
/** The same as addChild(node, false, null, false) */
public boolean addChild(INode node) {
final int low = searchChildren(node.getLocalNameBytes());
if (low >= 0) {
@ -463,21 +543,34 @@ private void addChild(final INode node, final int insertionPoint) {
@Override
public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
int lastSnapshotId) {
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
if (q != null) {
if (useCache && isQuotaSet()) {
q.addNamespaceDiskspace(counts);
} else {
computeDirectoryQuotaUsage(counts, false, lastSnapshotId);
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
// we are computing the quota usage for a specific snapshot here, i.e., the
// computation only includes files/directories that exist at the time of the
// given snapshot
if (sf != null && lastSnapshotId != Snapshot.INVALID_ID
&& !(useCache && isQuotaSet())) {
Snapshot lastSnapshot = sf.getDiffs().getSnapshotById(lastSnapshotId);
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
for (INode child : childrenList) {
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
}
counts.add(Quota.NAMESPACE, 1);
return counts;
}
// compute the quota usage in the scope of the current directory tree
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
return q.addNamespaceDiskspace(counts);
} else {
useCache = q != null && !q.isQuotaSet() ? false : useCache;
return computeDirectoryQuotaUsage(counts, useCache, lastSnapshotId);
}
}
Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts, boolean useCache,
int lastSnapshotId) {
private Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts,
boolean useCache, int lastSnapshotId) {
if (children != null) {
for (INode child : children) {
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
@ -489,12 +582,21 @@ Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts, boolean useCache,
/** Add quota usage for this inode excluding children. */
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
counts.add(Quota.NAMESPACE, 1);
// include the diff list
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null) {
sf.computeQuotaUsage4CurrentDirectory(counts);
}
return counts;
}
@Override
public ContentSummaryComputationContext computeContentSummary(
ContentSummaryComputationContext summary) {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null) {
sf.computeContentSummary4Snapshot(summary.getCounts());
}
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
if (q != null) {
return q.computeContentSummary(this, summary);
@ -521,13 +623,11 @@ ContentSummaryComputationContext computeDirectoryContentSummary(
if (lastYieldCount == summary.getYieldCount()) {
continue;
}
// The locks were released and reacquired. Check parent first.
if (getParent() == null) {
// Stop further counting and return whatever we have so far.
break;
}
// Obtain the children list again since it may have been modified.
childrenList = getChildrenList(null);
// Reposition in case the children list is changed. Decrement by 1
@ -537,24 +637,77 @@ ContentSummaryComputationContext computeDirectoryContentSummary(
// Increment the directory count for this directory.
summary.getCounts().add(Content.DIRECTORY, 1);
// Relinquish and reacquire locks if necessary.
summary.yield();
return summary;
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;
* otherwise, get the result from the current directory.
* @return the current children list if the specified snapshot is null;
* otherwise, return the children list corresponding to the snapshot.
* Note that the returned list is never null.
* This method is usually called by the undo section of rename.
*
* Before calling this function, in the rename operation, we replace the
* original src node (of the rename operation) with a reference node (WithName
* instance) in both the children list and a created list, delete the
* reference node from the children list, and add it to the corresponding
* deleted list.
*
* To undo the above operations, we have the following steps in particular:
*
* <pre>
* 1) remove the WithName node from the deleted list (if it exists)
* 2) replace the WithName node in the created list with srcChild
* 3) add srcChild back as a child of srcParent. Note that we already add
* the node into the created list of a snapshot diff in step 2, we do not need
* to add srcChild to the created list of the latest snapshot.
* </pre>
*
* We do not need to update quota usage because the old child is in the
* deleted list before.
*
* @param oldChild
* The reference node to be removed/replaced
* @param newChild
* The node to be added back
* @param latestSnapshot
* The latest snapshot. Note this may not be the last snapshot in the
* diff list, since the src tree of the current rename operation
* may be the dst tree of a previous rename.
* @throws QuotaExceededException should not throw this exception
*/
public ReadOnlyList<INode> getChildrenList(final Snapshot snapshot) {
return children == null ? ReadOnlyList.Util.<INode>emptyList()
: ReadOnlyList.Util.asReadOnlyList(children);
public void undoRename4ScrParent(final INodeReference oldChild,
final INode newChild, Snapshot latestSnapshot)
throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
Preconditions.checkState(sf != null,
"Directory does not have snapshot feature");
sf.getDiffs().removeChild(ListType.DELETED, oldChild);
sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
addChild(newChild, true, null);
}
/**
* Undo the rename operation for the dst tree, i.e., if the rename operation
* (with OVERWRITE option) removes a file/dir from the dst tree, add it back
* and delete possible record in the deleted list.
*/
public void undoRename4DstParent(final INode deletedChild,
Snapshot latestSnapshot) throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
Preconditions.checkState(sf != null,
"Directory does not have snapshot feature");
boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
deletedChild);
// pass null for inodeMap since the parent node will not get replaced when
// undoing rename
final boolean added = addChild(deletedChild, true, removeDeletedChild ? null
: latestSnapshot);
// update quota usage if adding is successfully and the old child has not
// been stored in deleted list before
if (added && !removeDeletedChild) {
final Quota.Counts counts = deletedChild.computeQuotaUsage();
addSpaceConsumed(counts.get(Quota.NAMESPACE),
counts.get(Quota.DISKSPACE), false);
}
}
/** Set the children list to null. */
@ -578,7 +731,7 @@ public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot,
// the diff list, the snapshot to be deleted has been combined or renamed
// to its latest previous snapshot. (besides, we also need to consider nodes
// created after prior but before snapshot. this will be done in
// INodeDirectoryWithSnapshot#cleanSubtree)
// DirectoryWithSnapshotFeature)
Snapshot s = snapshot != null && prior != null ? prior : snapshot;
for (INode child : getChildrenList(s)) {
if (snapshot != null && excludedNodes != null
@ -596,6 +749,10 @@ public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot,
@Override
public void destroyAndCollectBlocks(final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null) {
sf.clear(this, collectedBlocks, removedINodes);
}
for (INode child : getChildrenList(null)) {
child.destroyAndCollectBlocks(collectedBlocks, removedINodes);
}
@ -608,6 +765,13 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
// there is snapshot data
if (sf != null) {
return sf.cleanDirectory(this, snapshot, prior, collectedBlocks,
removedINodes, countDiffChange);
}
// there is no snapshot data
if (prior == null && snapshot == null) {
// destroy the whole subtree and collect blocks that should be deleted
Quota.Counts counts = Quota.Counts.newInstance();

View File

@ -27,7 +27,11 @@
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.*;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
@ -246,6 +250,8 @@ boolean removeLastBlock(Block oldblock) {
/* Start of Snapshot Feature */
private FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
Preconditions.checkState(!isWithSnapshot(),
"File is already with snapshot");
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
this.addFeature(sf);
return sf;
@ -279,25 +285,23 @@ public String toDetailString() {
public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
return sf.getSnapshotINode(this, snapshot);
return sf.getDiffs().getSnapshotINode(snapshot, this);
} else {
return this;
}
}
@Override
public INodeFile recordModification(final Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
public INodeFile recordModification(final Snapshot latest)
throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
// the file is in snapshot, create a snapshot feature if it does not have
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf == null) {
sf = addSnapshotFeature(null);
}
// record self in the diff list if necessary
if (!shouldRecordInSrcSnapshot(latest)) {
sf.getDiffs().saveSelf2Snapshot(latest, this, null);
}
sf.getDiffs().saveSelf2Snapshot(latest, this, null);
}
return this;
}
@ -349,7 +353,7 @@ public final void setFileReplication(short replication) {
/** Set the replication factor of this file. */
public final INodeFile setFileReplication(short replication, Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
final INodeFile nodeToUpdate = recordModification(latest, inodeMap);
final INodeFile nodeToUpdate = recordModification(latest);
nodeToUpdate.setFileReplication(replication);
return nodeToUpdate;
}

View File

@ -89,8 +89,7 @@ public INode get(long id) {
"", "", new FsPermission((short) 0)), 0, 0) {
@Override
INode recordModification(Snapshot latest, INodeMap inodeMap)
throws QuotaExceededException {
INode recordModification(Snapshot latest) throws QuotaExceededException {
return null;
}

View File

@ -26,7 +26,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions;
@ -103,9 +103,12 @@ static Snapshot getPriorSnapshot(INodeReference ref) {
INode referred = wc.getReferredINode();
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
return referred.asFile().getDiffs().getPrior(wn.lastSnapshotId);
} else if (referred instanceof INodeDirectoryWithSnapshot) {
return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
wn.lastSnapshotId);
} else if (referred.isDirectory()) {
DirectoryWithSnapshotFeature sf = referred.asDirectory()
.getDirectoryWithSnapshotFeature();
if (sf != null) {
return sf.getDiffs().getPrior(wn.lastSnapshotId);
}
}
}
return null;
@ -231,9 +234,9 @@ public final long getModificationTime(Snapshot snapshot) {
}
@Override
public final INode updateModificationTime(long mtime, Snapshot latest,
INodeMap inodeMap) throws QuotaExceededException {
return referred.updateModificationTime(mtime, latest, inodeMap);
public final INode updateModificationTime(long mtime, Snapshot latest)
throws QuotaExceededException {
return referred.updateModificationTime(mtime, latest);
}
@Override
@ -252,9 +255,9 @@ public final void setAccessTime(long accessTime) {
}
@Override
final INode recordModification(Snapshot latest, final INodeMap inodeMap)
final INode recordModification(Snapshot latest)
throws QuotaExceededException {
referred.recordModification(latest, inodeMap);
referred.recordModification(latest);
// reference is never replaced
return this;
}
@ -547,9 +550,12 @@ private Snapshot getSelfSnapshot() {
Snapshot snapshot = null;
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
snapshot = referred.asFile().getDiffs().getPrior(lastSnapshotId);
} else if (referred instanceof INodeDirectoryWithSnapshot) {
snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
lastSnapshotId);
} else if (referred.isDirectory()) {
DirectoryWithSnapshotFeature sf = referred.asDirectory()
.getDirectoryWithSnapshotFeature();
if (sf != null) {
snapshot = sf.getDiffs().getPrior(lastSnapshotId);
}
}
return snapshot;
}
@ -634,10 +640,11 @@ public void destroyAndCollectBlocks(
Snapshot snapshot = getSelfSnapshot(prior);
INode referred = getReferredINode().asReference().getReferredINode();
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
// if referred is a file, it must be a file with Snapshot since we did
if (referred.isFile()) {
// if referred is a file, it must be a file with snapshot since we did
// recordModification before the rename
INodeFile file = referred.asFile();
Preconditions.checkState(file.isWithSnapshot());
// make sure we mark the file as deleted
file.getFileWithSnapshotFeature().deleteCurrentFile();
try {
@ -649,14 +656,14 @@ public void destroyAndCollectBlocks(
} catch (QuotaExceededException e) {
LOG.error("should not exceed quota while snapshot deletion", e);
}
} else if (referred instanceof INodeDirectoryWithSnapshot) {
} else if (referred.isDirectory()) {
// similarly, if referred is a directory, it must be an
// INodeDirectoryWithSnapshot
INodeDirectoryWithSnapshot sdir =
(INodeDirectoryWithSnapshot) referred;
// INodeDirectory with snapshot
INodeDirectory dir = referred.asDirectory();
Preconditions.checkState(dir.isWithSnapshot());
try {
INodeDirectoryWithSnapshot.destroyDstSubtree(sdir, snapshot, prior,
collectedBlocks, removedINodes);
DirectoryWithSnapshotFeature.destroyDstSubtree(dir, snapshot,
prior, collectedBlocks, removedINodes);
} catch (QuotaExceededException e) {
LOG.error("should not exceed quota while snapshot deletion", e);
}
@ -670,9 +677,12 @@ private Snapshot getSelfSnapshot(final Snapshot prior) {
Snapshot lastSnapshot = null;
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
lastSnapshot = referred.asFile().getDiffs().getLastSnapshot();
} else if (referred instanceof INodeDirectoryWithSnapshot) {
lastSnapshot = ((INodeDirectoryWithSnapshot) referred)
.getLastSnapshot();
} else if (referred.isDirectory()) {
DirectoryWithSnapshotFeature sf = referred.asDirectory()
.getDirectoryWithSnapshotFeature();
if (sf != null) {
lastSnapshot = sf.getLastSnapshot();
}
}
if (lastSnapshot != null && !lastSnapshot.equals(prior)) {
return lastSnapshot;

View File

@ -45,11 +45,10 @@ public class INodeSymlink extends INodeWithAdditionalFields {
}
@Override
INode recordModification(Snapshot latest, final INodeMap inodeMap)
throws QuotaExceededException {
INode recordModification(Snapshot latest) throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
INodeDirectory parent = getParent();
parent.saveChild2Snapshot(this, latest, new INodeSymlink(this), inodeMap);
parent.saveChild2Snapshot(this, latest, new INodeSymlink(this));
}
return this;
}

View File

@ -231,13 +231,13 @@ final long getModificationTime(Snapshot snapshot) {
/** Update modification time if it is larger than the current value. */
@Override
public final INode updateModificationTime(long mtime, Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
public final INode updateModificationTime(long mtime, Snapshot latest)
throws QuotaExceededException {
Preconditions.checkState(isDirectory());
if (mtime <= modificationTime) {
return this;
}
return setModificationTime(mtime, latest, inodeMap);
return setModificationTime(mtime, latest);
}
final void cloneModificationTime(INodeWithAdditionalFields that) {

View File

@ -26,8 +26,8 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions;
@ -132,11 +132,11 @@ static INodesInPath resolve(final INodeDirectory startingDir,
final boolean isRef = curNode.isReference();
final boolean isDir = curNode.isDirectory();
final INodeDirectory dir = isDir? curNode.asDirectory(): null;
if (!isRef && isDir && dir instanceof INodeDirectoryWithSnapshot) {
if (!isRef && isDir && dir.isWithSnapshot()) {
//if the path is a non-snapshot path, update the latest snapshot.
if (!existing.isSnapshot()) {
existing.updateLatestSnapshot(
((INodeDirectoryWithSnapshot)dir).getLastSnapshot());
existing.updateLatestSnapshot(dir.getDirectoryWithSnapshotFeature()
.getLastSnapshot());
}
} else if (isRef && isDir && !lastComp) {
// If the curNode is a reference node, need to check its dstSnapshot:
@ -155,10 +155,10 @@ static INodesInPath resolve(final INodeDirectory startingDir,
if (latest == null || // no snapshot in dst tree of rename
dstSnapshotId >= latest.getId()) { // the above scenario
Snapshot lastSnapshot = null;
if (curNode.isDirectory()
&& curNode.asDirectory() instanceof INodeDirectoryWithSnapshot) {
lastSnapshot = ((INodeDirectoryWithSnapshot) curNode
.asDirectory()).getLastSnapshot();
DirectoryWithSnapshotFeature sf = null;
if (curNode.isDirectory() &&
(sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
lastSnapshot = sf.getLastSnapshot();
}
existing.setSnapshot(lastSnapshot);
}

View File

@ -1008,6 +1008,7 @@ public DatanodeCommand cacheReport(DatanodeRegistration nodeReg,
public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
verifyRequest(nodeReg);
metrics.incrBlockReceivedAndDeletedOps();
if(blockStateChangeLog.isDebugEnabled()) {
blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
+"from "+nodeReg+" "+receivedAndDeletedBlocks.length

View File

@ -71,6 +71,8 @@ public class NameNodeMetrics {
MutableCounterLong listSnapshottableDirOps;
@Metric("Number of snapshotDiffReport operations")
MutableCounterLong snapshotDiffReportOps;
@Metric("Number of blockReceivedAndDeleted calls")
MutableCounterLong blockReceivedAndDeletedOps;
@Metric("Journal transactions") MutableRate transactions;
@Metric("Journal syncs") MutableRate syncs;
@ -209,6 +211,10 @@ public void incrSnapshotDiffReportOps() {
snapshotDiffReportOps.incr();
}
public void incrBlockReceivedAndDeletedOps() {
blockReceivedAndDeletedOps.incr();
}
public void addTransaction(long latency) {
transactions.add(latency);
}

View File

@ -98,7 +98,7 @@ final void setPosterior(D posterior) {
}
/** Save the INode state to the snapshot if it is not done already. */
void saveSnapshotCopy(A snapshotCopy, N currentINode) {
void saveSnapshotCopy(A snapshotCopy) {
Preconditions.checkState(snapshotINode == null, "Expected snapshotINode to be null");
snapshotINode = snapshotCopy;
}

View File

@ -25,8 +25,8 @@
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota;
/**
@ -271,7 +271,7 @@ final boolean changedBetweenSnapshots(Snapshot earlier, Snapshot later) {
* Note that the current inode is returned if there is no change
* between the given snapshot and the current state.
*/
A getSnapshotINode(final Snapshot snapshot, final A currentINode) {
public A getSnapshotINode(final Snapshot snapshot, final A currentINode) {
final D diff = getDiff(snapshot);
final A inode = diff == null? null: diff.getSnapshotINode();
return inode == null? currentINode: inode;
@ -306,7 +306,7 @@ public void saveSelf2Snapshot(Snapshot latest, N currentINode, A snapshotCopy)
if (snapshotCopy == null) {
snapshotCopy = createSnapshotCopy(currentINode);
}
diff.saveSnapshotCopy(snapshotCopy, currentINode);
diff.saveSnapshotCopy(snapshotCopy);
}
}
}

View File

@ -25,7 +25,6 @@
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota;
/**
@ -57,10 +56,6 @@ public void deleteCurrentFile() {
isCurrentFileDeleted = true;
}
public INodeFileAttributes getSnapshotINode(INodeFile f, Snapshot snapshot) {
return diffs.getSnapshotINode(snapshot, f);
}
public FileDiffList getDiffs() {
return diffs;
}
@ -90,7 +85,7 @@ public Quota.Counts cleanFile(final INodeFile file, final Snapshot snapshot,
if (snapshot == null) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(prior, null);
file.recordModification(prior);
deleteCurrentFile();
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);

View File

@ -44,6 +44,8 @@
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.util.Time;
@ -58,7 +60,7 @@
* by the namesystem and FSDirectory locks.
*/
@InterfaceAudience.Private
public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
public class INodeDirectorySnapshottable extends INodeDirectory {
/** Limit the number of snapshot per snapshottable directory. */
static final int SNAPSHOT_LIMIT = 1 << 16;
@ -115,8 +117,8 @@ public int compare(INode left, INode right) {
* the two snapshots, while its associated value is a {@link ChildrenDiff}
* storing the changes (creation/deletion) happened to the children (files).
*/
private final Map<INodeDirectoryWithSnapshot, ChildrenDiff> dirDiffMap =
new HashMap<INodeDirectoryWithSnapshot, ChildrenDiff>();
private final Map<INodeDirectory, ChildrenDiff> dirDiffMap =
new HashMap<INodeDirectory, ChildrenDiff>();
SnapshotDiffInfo(INodeDirectorySnapshottable snapshotRoot, Snapshot start,
Snapshot end) {
@ -126,8 +128,8 @@ public int compare(INode left, INode right) {
}
/** Add a dir-diff pair */
private void addDirDiff(INodeDirectoryWithSnapshot dir,
byte[][] relativePath, ChildrenDiff diff) {
private void addDirDiff(INodeDirectory dir, byte[][] relativePath,
ChildrenDiff diff) {
dirDiffMap.put(dir, diff);
diffMap.put(dir, relativePath);
}
@ -154,8 +156,7 @@ public SnapshotDiffReport generateReport() {
if (node.isDirectory()) {
ChildrenDiff dirDiff = dirDiffMap.get(node);
List<DiffReportEntry> subList = dirDiff.generateReport(
diffMap.get(node), (INodeDirectoryWithSnapshot) node,
isFromEarlier());
diffMap.get(node), isFromEarlier());
diffReportList.addAll(subList);
}
}
@ -183,8 +184,11 @@ ReadOnlyList<Snapshot> getSnapshotsByNames() {
private int snapshotQuota = SNAPSHOT_LIMIT;
public INodeDirectorySnapshottable(INodeDirectory dir) {
super(dir, true, dir instanceof INodeDirectoryWithSnapshot ?
((INodeDirectoryWithSnapshot) dir).getDiffs(): null);
super(dir, true, true);
// add snapshot feature if the original directory does not have it
if (!isWithSnapshot()) {
addSnapshotFeature(null);
}
}
/** @return the number of existing snapshots. */
@ -298,8 +302,8 @@ Snapshot addSnapshot(int id, String name) throws SnapshotException,
snapshotsByNames.add(-i - 1, s);
//set modification time
updateModificationTime(Time.now(), null, null);
s.getRoot().setModificationTime(getModificationTime(), null, null);
updateModificationTime(Time.now(), null);
s.getRoot().setModificationTime(getModificationTime(), null);
return s;
}
@ -413,12 +417,12 @@ private void computeDiffRecursively(INode node, List<byte[]> parentPath,
byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory();
if (dir instanceof INodeDirectoryWithSnapshot) {
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
boolean change = sdir.computeDiffBetweenSnapshots(
diffReport.from, diffReport.to, diff);
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
boolean change = sf.computeDiffBetweenSnapshots(diffReport.from,
diffReport.to, diff, dir);
if (change) {
diffReport.addDirDiff(sdir, relativePath, diff);
diffReport.addDirDiff(dir, relativePath, diff);
}
}
ReadOnlyList<INode> children = dir.getChildrenList(diffReport
@ -453,13 +457,15 @@ private void computeDiffRecursively(INode node, List<byte[]> parentPath,
INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap)
throws QuotaExceededException {
if (latest == null) {
Preconditions.checkState(getLastSnapshot() == null,
Preconditions.checkState(
getDirectoryWithSnapshotFeature().getLastSnapshot() == null,
"latest == null but getLastSnapshot() != null, this=%s", this);
return replaceSelf4INodeDirectory(inodeMap);
} else {
return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
.recordModification(latest, null);
}
INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap);
if (latest != null) {
dir.recordModification(latest);
}
return dir;
}
@Override

View File

@ -117,9 +117,8 @@ public static Snapshot findLatestSnapshot(INode inode, Snapshot anchor) {
for(; inode != null; inode = inode.getParent()) {
if (inode.isDirectory()) {
final INodeDirectory dir = inode.asDirectory();
if (dir instanceof INodeDirectoryWithSnapshot) {
latest = ((INodeDirectoryWithSnapshot) dir).getDiffs().updatePrior(
anchor, latest);
if (dir.isWithSnapshot()) {
latest = dir.getDiffs().updatePrior(anchor, latest);
}
}
}

View File

@ -36,8 +36,8 @@
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
@ -91,8 +91,7 @@ void saveINodeDiffs(final AbstractINodeDiffList<N, A, D> diffs,
public static void saveDirectoryDiffList(final INodeDirectory dir,
final DataOutput out, final ReferenceMap referenceMap
) throws IOException {
saveINodeDiffs(dir instanceof INodeDirectoryWithSnapshot?
((INodeDirectoryWithSnapshot)dir).getDiffs(): null, out, referenceMap);
saveINodeDiffs(dir.getDiffs(), out, referenceMap);
}
public static void saveFileDiffList(final INodeFile file,
@ -139,7 +138,7 @@ private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
* @return The created node.
*/
private static INode loadCreated(byte[] createdNodeName,
INodeDirectoryWithSnapshot parent) throws IOException {
INodeDirectory parent) throws IOException {
// the INode in the created list should be a reference to another INode
// in posterior SnapshotDiffs or one of the current children
for (DirectoryDiff postDiff : parent.getDiffs()) {
@ -165,7 +164,7 @@ private static INode loadCreated(byte[] createdNodeName,
* @param in The {@link DataInput} to read.
* @return The created list.
*/
private static List<INode> loadCreatedList(INodeDirectoryWithSnapshot parent,
private static List<INode> loadCreatedList(INodeDirectory parent,
DataInput in) throws IOException {
// read the size of the created list
int createdSize = in.readInt();
@ -188,7 +187,7 @@ private static List<INode> loadCreatedList(INodeDirectoryWithSnapshot parent,
* @param loader The {@link Loader} instance.
* @return The deleted list.
*/
private static List<INode> loadDeletedList(INodeDirectoryWithSnapshot parent,
private static List<INode> loadDeletedList(INodeDirectory parent,
List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
throws IOException {
int deletedSize = in.readInt();
@ -239,11 +238,10 @@ public static void loadSnapshotList(
public static void loadDirectoryDiffList(INodeDirectory dir,
DataInput in, FSImageFormat.Loader loader) throws IOException {
final int size = in.readInt();
if (dir instanceof INodeDirectoryWithSnapshot) {
INodeDirectoryWithSnapshot withSnapshot = (INodeDirectoryWithSnapshot)dir;
DirectoryDiffList diffs = withSnapshot.getDiffs();
if (dir.isWithSnapshot()) {
DirectoryDiffList diffs = dir.getDiffs();
for (int i = 0; i < size; i++) {
diffs.addFirst(loadDirectoryDiff(withSnapshot, in, loader));
diffs.addFirst(loadDirectoryDiff(dir, in, loader));
}
}
}
@ -277,9 +275,8 @@ private static INodeDirectoryAttributes loadSnapshotINodeInDirectoryDiff(
* using.
* @return A {@link DirectoryDiff}.
*/
private static DirectoryDiff loadDirectoryDiff(
INodeDirectoryWithSnapshot parent, DataInput in,
FSImageFormat.Loader loader) throws IOException {
private static DirectoryDiff loadDirectoryDiff(INodeDirectory parent,
DataInput in, FSImageFormat.Loader loader) throws IOException {
// 1. Read the full path of the Snapshot root to identify the Snapshot
final Snapshot snapshot = loader.getSnapshot(in);

View File

@ -2143,17 +2143,14 @@ public File getInstanceStorageDir(int dnIndex, int dirIndex) {
}
/**
* Get a storage directory for a datanode. There are two storage directories
* per datanode:
* Get a storage directory for a datanode.
* <ol>
* <li><base directory>/data/data<2*dnIndex + 1></li>
* <li><base directory>/data/data<2*dnIndex + 2></li>
* </ol>
*
* @param dnIndex datanode index (starts from 0)
* @param dirIndex directory index (0 or 1). Index 0 provides access to the
* first storage directory. Index 1 provides access to the second
* storage directory.
* @param dirIndex directory index.
* @return Storage directory
*/
public static File getStorageDir(int dnIndex, int dirIndex) {
@ -2164,7 +2161,7 @@ public static File getStorageDir(int dnIndex, int dirIndex) {
* Calculate the DN instance-specific path for appending to the base dir
* to determine the location of the storage of a DN instance in the mini cluster
* @param dnIndex datanode index
* @param dirIndex directory index (0 or 1).
* @param dirIndex directory index.
* @return
*/
private static String getStorageDirPath(int dnIndex, int dirIndex) {

View File

@ -71,7 +71,15 @@
/**
* This test simulates a variety of situations when blocks are being
* intentionally corrupted, unexpectedly modified, and so on before a block
* report is happening
* report is happening.
*
* For each test case it runs two variations:
* #1 - For a given DN, the first variation sends block reports for all
* storages in a single call to the NN.
* #2 - For a given DN, the second variation sends block reports for each
* storage in a separate call.
*
* The behavior should be the same in either variation.
*/
public class TestBlockReport {
public static final Log LOG = LogFactory.getLog(TestBlockReport.class);
@ -157,6 +165,113 @@ private static StorageBlockReport[] getBlockReports(
return reports;
}
/**
* Utility routine to send block reports to the NN, either in a single call
* or reporting one storage per call.
*
* @param dnR
* @param poolId
* @param reports
* @param needtoSplit
* @throws IOException
*/
private void sendBlockReports(DatanodeRegistration dnR, String poolId,
StorageBlockReport[] reports, boolean needtoSplit) throws IOException {
if (!needtoSplit) {
LOG.info("Sending combined block reports for " + dnR);
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
} else {
for (StorageBlockReport report : reports) {
LOG.info("Sending block report for storage " + report.getStorage().getStorageID());
StorageBlockReport[] singletonReport = { report };
cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport);
}
}
}
/**
* Test variations blockReport_01 through blockReport_09 with combined
* and split block reports.
*/
@Test
public void blockReportCombined_01() throws IOException {
blockReport_01(false);
}
@Test
public void blockReportSplit_01() throws IOException {
blockReport_01(true);
}
@Test
public void blockReportCombined_02() throws IOException {
blockReport_02(false);
}
@Test
public void blockReportSplit_02() throws IOException {
blockReport_02(true);
}
@Test
public void blockReportCombined_03() throws IOException {
blockReport_03(false);
}
@Test
public void blockReportSplit_03() throws IOException {
blockReport_03(true);
}
@Test
public void blockReportCombined_04() throws IOException {
blockReport_04(false);
}
@Test
public void blockReportSplit_04() throws IOException {
blockReport_04(true);
}
@Test
public void blockReportCombined_06() throws Exception {
blockReport_06(false);
}
@Test
public void blockReportSplit_06() throws Exception {
blockReport_06(true);
}
@Test
public void blockReportCombined_07() throws Exception {
blockReport_07(false);
}
@Test
public void blockReportSplit_07() throws Exception {
blockReport_07(true);
}
@Test
public void blockReportCombined_08() throws Exception {
blockReport_08(false);
}
@Test
public void blockReportSplit_08() throws Exception {
blockReport_08(true);
}
@Test
public void blockReportCombined_09() throws Exception {
blockReport_09(false);
}
@Test
public void blockReportSplit_09() throws Exception {
blockReport_09(true);
}
/**
* Test write a file, verifies and closes it. Then the length of the blocks
* are messed up and BlockReport is forced.
@ -164,8 +279,7 @@ private static StorageBlockReport[] getBlockReports(
*
* @throws java.io.IOException on an error
*/
@Test
public void blockReport_01() throws IOException {
private void blockReport_01(boolean splitBlockReports) throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
@ -198,7 +312,7 @@ public void blockReport_01() throws IOException {
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
sendBlockReports(dnR, poolId, reports, splitBlockReports);
List<LocatedBlock> blocksAfterReport =
DFSTestUtil.getAllBlocks(fs.open(filePath));
@ -224,8 +338,7 @@ public void blockReport_01() throws IOException {
*
* @throws IOException in case of errors
*/
@Test
public void blockReport_02() throws IOException {
private void blockReport_02(boolean splitBlockReports) throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
LOG.info("Running test " + METHOD_NAME);
@ -280,7 +393,7 @@ public void blockReport_02() throws IOException {
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn0, poolId, false, false);
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
sendBlockReports(dnR, poolId, reports, splitBlockReports);
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
.getBlockManager());
@ -301,8 +414,7 @@ public void blockReport_02() throws IOException {
*
* @throws IOException in case of an error
*/
@Test
public void blockReport_03() throws IOException {
private void blockReport_03(boolean splitBlockReports) throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
@ -312,11 +424,7 @@ public void blockReport_03() throws IOException {
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
DatanodeCommand dnCmd =
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
if(LOG.isDebugEnabled()) {
LOG.debug("Got the command: " + dnCmd);
}
sendBlockReports(dnR, poolId, reports, splitBlockReports);
printStats();
assertThat("Wrong number of corrupt blocks",
@ -333,8 +441,7 @@ public void blockReport_03() throws IOException {
*
* @throws IOException in case of an error
*/
@Test
public void blockReport_04() throws IOException {
private void blockReport_04(boolean splitBlockReports) throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs, filePath,
@ -352,11 +459,7 @@ public void blockReport_04() throws IOException {
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
DatanodeCommand dnCmd =
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
if(LOG.isDebugEnabled()) {
LOG.debug("Got the command: " + dnCmd);
}
sendBlockReports(dnR, poolId, reports, splitBlockReports);
printStats();
assertThat("Wrong number of corrupt blocks",
@ -373,8 +476,7 @@ public void blockReport_04() throws IOException {
*
* @throws IOException in case of an error
*/
@Test
public void blockReport_06() throws Exception {
private void blockReport_06(boolean splitBlockReports) throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@ -387,7 +489,7 @@ public void blockReport_06() throws Exception {
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
sendBlockReports(dnR, poolId, reports, splitBlockReports);
printStats();
assertEquals("Wrong number of PendingReplication Blocks",
0, cluster.getNamesystem().getUnderReplicatedBlocks());
@ -406,8 +508,7 @@ public void blockReport_06() throws Exception {
*
* @throws IOException in case of an error
*/
@Test
public void blockReport_07() throws Exception {
private void blockReport_07(boolean splitBlockReports) throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@ -421,7 +522,7 @@ public void blockReport_07() throws Exception {
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
sendBlockReports(dnR, poolId, reports, splitBlockReports);
printStats();
assertThat("Wrong number of corrupt blocks",
@ -432,7 +533,7 @@ public void blockReport_07() throws Exception {
cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
reports = getBlockReports(dn, poolId, true, true);
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
sendBlockReports(dnR, poolId, reports, splitBlockReports);
printStats();
assertThat("Wrong number of corrupt blocks",
@ -458,8 +559,7 @@ public void blockReport_07() throws Exception {
*
* @throws IOException in case of an error
*/
@Test
public void blockReport_08() throws IOException {
private void blockReport_08(boolean splitBlockReports) throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@ -483,8 +583,8 @@ public void blockReport_08() throws IOException {
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] report = getBlockReports(dn, poolId, false, false);
cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports, splitBlockReports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",
blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
@ -500,8 +600,7 @@ public void blockReport_08() throws IOException {
// Similar to BlockReport_08 but corrupts GS and len of the TEMPORARY's
// replica block. Expect the same behaviour: NN should simply ignore this
// block
@Test
public void blockReport_09() throws IOException {
private void blockReport_09(boolean splitBlockReports) throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@ -526,8 +625,8 @@ public void blockReport_09() throws IOException {
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] report = getBlockReports(dn, poolId, true, true);
cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
sendBlockReports(dnR, poolId, reports, splitBlockReports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",
2, cluster.getNamesystem().getPendingReplicationBlocks());

View File

@ -0,0 +1,87 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.junit.Test;
import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.*;
/**
* Test to verify that the DataNode Uuid is correctly initialized before
* FsDataSet initialization.
*/
public class TestDataNodeInitStorage {
public static final Log LOG = LogFactory.getLog(TestDataNodeInitStorage.class);
static private class SimulatedFsDatasetVerifier extends SimulatedFSDataset {
static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> {
@Override
public SimulatedFsDatasetVerifier newInstance(
DataNode datanode, DataStorage storage,
Configuration conf) throws IOException {
return new SimulatedFsDatasetVerifier(storage, conf);
}
@Override
public boolean isSimulated() {
return true;
}
}
public static void setFactory(Configuration conf) {
conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
Factory.class.getName());
}
// This constructor does the actual verification by ensuring that
// the DatanodeUuid is initialized.
public SimulatedFsDatasetVerifier(DataStorage storage, Configuration conf) {
super(storage, conf);
LOG.info("Assigned DatanodeUuid is " + storage.getDatanodeUuid());
assert(storage.getDatanodeUuid() != null);
assert(storage.getDatanodeUuid().length() != 0);
}
}
@Test (timeout = 60000)
public void testDataNodeInitStorage() throws Throwable {
// Create configuration to use SimulatedFsDatasetVerifier#Factory.
Configuration conf = new HdfsConfiguration();
SimulatedFsDatasetVerifier.setFactory(conf);
// Start a cluster so that SimulatedFsDatasetVerifier constructor is
// invoked.
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
cluster.shutdown();
}
}

View File

@ -0,0 +1,213 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* This test verifies that incremental block reports from a single DataNode are
* correctly handled by NN. Tests the following variations:
* #1 - Incremental BRs from all storages combined in a single call.
* #2 - Incremental BRs from separate storages sent in separate calls.
*
* We also verify that the DataNode is not splitting the reports (it may do so
* in the future).
*/
public class TestIncrementalBrVariations {
public static final Log LOG = LogFactory.getLog(TestIncrementalBrVariations.class);
private static short NUM_DATANODES = 1;
static final int BLOCK_SIZE = 1024;
static final int NUM_BLOCKS = 10;
private static final long seed = 0xFACEFEEDL;
private static final String NN_METRICS = "NameNodeActivity";
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DFSClient client;
private static Configuration conf;
static {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) BlockManager.blockLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) TestIncrementalBrVariations.LOG).getLogger().setLevel(Level.ALL);
}
@Before
public void startUpCluster() throws IOException {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
fs = cluster.getFileSystem();
client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
cluster.getConfiguration(0));
}
@After
public void shutDownCluster() throws IOException {
client.close();
fs.close();
cluster.shutdownDataNodes();
cluster.shutdown();
}
/**
* Incremental BRs from all storages combined in a single message.
*/
@Test
public void testCombinedIncrementalBlockReport() throws IOException {
verifyIncrementalBlockReports(false);
}
/**
* One incremental BR per storage.
*/
@Test
public void testSplitIncrementalBlockReport() throws IOException {
verifyIncrementalBlockReports(true);
}
private LocatedBlocks createFileGetBlocks(String filenamePrefix) throws IOException {
Path filePath = new Path("/" + filenamePrefix + ".dat");
// Write out a file with a few blocks, get block locations.
DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS,
BLOCK_SIZE, NUM_DATANODES, seed);
// Get the block list for the file with the block locations.
LocatedBlocks blocks = client.getLocatedBlocks(
filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS);
assertThat(cluster.getNamesystem().getUnderReplicatedBlocks(), is(0L));
return blocks;
}
public void verifyIncrementalBlockReports(boolean splitReports) throws IOException {
// Get the block list for the file with the block locations.
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
// A blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(0);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
// We will send 'fake' incremental block reports to the NN that look
// like they originated from DN 0.
StorageReceivedDeletedBlocks reports[] =
new StorageReceivedDeletedBlocks[dn.getFSDataset().getVolumes().size()];
// Lie to the NN that one block on each storage has been deleted.
for (int i = 0; i < reports.length; ++i) {
FsVolumeSpi volume = dn.getFSDataset().getVolumes().get(i);
boolean foundBlockOnStorage = false;
ReceivedDeletedBlockInfo rdbi[] = new ReceivedDeletedBlockInfo[1];
// Find the first block on this storage and mark it as deleted for the
// report.
for (LocatedBlock block : blocks.getLocatedBlocks()) {
if (block.getStorageIDs()[0].equals(volume.getStorageID())) {
rdbi[0] = new ReceivedDeletedBlockInfo(block.getBlock().getLocalBlock(),
ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null);
foundBlockOnStorage = true;
break;
}
}
assertTrue(foundBlockOnStorage);
reports[i] = new StorageReceivedDeletedBlocks(volume.getStorageID(), rdbi);
if (splitReports) {
// If we are splitting reports then send the report for this storage now.
StorageReceivedDeletedBlocks singletonReport[] = { reports[i] };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, singletonReport);
}
}
if (!splitReports) {
// Send a combined report.
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, reports);
}
// Make sure that the deleted block from each storage was picked up
// by the NameNode.
assertThat(cluster.getNamesystem().getMissingBlocksCount(), is((long) reports.length));
}
/**
* Verify that the DataNode sends a single incremental block report for all
* storages.
* @throws IOException
* @throws InterruptedException
*/
@Test (timeout=60000)
public void testDataNodeDoesNotSplitReports()
throws IOException, InterruptedException {
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
assertThat(cluster.getDataNodes().size(), is(1));
DataNode dn = cluster.getDataNodes().get(0);
// Remove all blocks from the DataNode.
for (LocatedBlock block : blocks.getLocatedBlocks()) {
dn.notifyNamenodeDeletedBlock(
block.getBlock(), block.getStorageIDs()[0]);
}
LOG.info("Triggering report after deleting blocks");
long ops = getLongCounter("BlockReceivedAndDeletedOps", getMetrics(NN_METRICS));
// Trigger a report to the NameNode and give it a few seconds.
DataNodeTestUtils.triggerBlockReport(dn);
Thread.sleep(5000);
// Ensure that NameNodeRpcServer.blockReceivedAndDeletes is invoked
// exactly once after we triggered the report.
assertCounter("BlockReceivedAndDeletedOps", ops+1, getMetrics(NN_METRICS));
}
}

View File

@ -41,8 +41,8 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.log4j.Level;

View File

@ -781,7 +781,7 @@ private INode createTreeOfInodes(String path) throws QuotaExceededException {
}
System.out.println("Adding component " + DFSUtil.bytes2String(component));
dir = new INodeDirectory(++id, component, permstatus, 0);
prev.addChild(dir, false, null, null);
prev.addChild(dir, false, null);
prev = dir;
}
return dir; // Last Inode in the chain

View File

@ -31,7 +31,6 @@
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.AfterClass;
import org.junit.Assert;
@ -206,8 +205,7 @@ public void testSnapshotPathINodes() throws Exception {
// Check the INode for file1 (snapshot file)
INode snapshotFileNode = inodes[inodes.length - 1];
assertINodeFile(snapshotFileNode, file1);
assertTrue(snapshotFileNode.getParent() instanceof
INodeDirectoryWithSnapshot);
assertTrue(snapshotFileNode.getParent().isWithSnapshot());
// Call getExistingPathINodes and request only one INode.
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);

View File

@ -44,7 +44,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;

View File

@ -358,7 +358,7 @@ public void testDisallowNestedSnapshottableDir() throws Exception {
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INode subNode = fsdir.getINode(sub.toString());
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
assertTrue(subNode.asDirectory().isWithSnapshot());
hdfs.allowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
@ -366,6 +366,6 @@ public void testDisallowNestedSnapshottableDir() throws Exception {
hdfs.disallowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
assertTrue(subNode.asDirectory().isWithSnapshot());
}
}

View File

@ -59,12 +59,11 @@
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.test.GenericTestUtils;
@ -757,10 +756,10 @@ public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
// only 2 references: one in deleted list of sdir1, one in created list of
// sdir1
assertEquals(2, fooWithCount.getReferenceCount());
INodeDirectoryWithSnapshot foo = (INodeDirectoryWithSnapshot) fooWithCount
.asDirectory();
INodeDirectory foo = fooWithCount.asDirectory();
assertEquals(1, foo.getDiffs().asList().size());
assertEquals("s1", foo.getLastSnapshot().getRoot().getLocalName());
assertEquals("s1", foo.getDirectoryWithSnapshotFeature().getLastSnapshot()
.getRoot().getLocalName());
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1, bar1.getDiffs().asList().size());
assertEquals("s1", bar1.getDiffs().getLastSnapshot().getRoot()
@ -973,8 +972,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
// 5 references: s1, s22, s333, s2222, current tree of sdir1
assertEquals(5, fooWithCount.getReferenceCount());
INodeDirectoryWithSnapshot foo = (INodeDirectoryWithSnapshot) fooWithCount
.asDirectory();
INodeDirectory foo = fooWithCount.asDirectory();
List<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
assertEquals(4, fooDiffs.size());
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
@ -1032,7 +1030,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
fooRef = fsdir.getINode(foo_s2222.toString()).asReference();
fooWithCount = (WithCount) fooRef.getReferredINode();
assertEquals(4, fooWithCount.getReferenceCount());
foo = (INodeDirectoryWithSnapshot) fooWithCount.asDirectory();
foo = fooWithCount.asDirectory();
fooDiffs = foo.getDiffs().asList();
assertEquals(4, fooDiffs.size());
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
@ -1171,8 +1169,7 @@ public void testRenameDirAndDeleteSnapshot_2() throws Exception {
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
assertEquals(1, fooWC.getReferenceCount());
INodeDirectoryWithSnapshot fooDir = (INodeDirectoryWithSnapshot) fooWC
.getReferredINode().asDirectory();
INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
assertEquals(1, diffs.size());
assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName());
@ -1263,7 +1260,7 @@ public void testRenameUndo_1() throws Exception {
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject(), (INodeMap) anyObject());
(Snapshot) anyObject());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
@ -1288,9 +1285,8 @@ public void testRenameUndo_1() throws Exception {
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode)
.getDiffs().asList();
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
@ -1302,7 +1298,7 @@ public void testRenameUndo_1() throws Exception {
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
@ -1331,7 +1327,7 @@ public void testRenameUndo_2() throws Exception {
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject(), (INodeMap) anyObject());
(Snapshot) anyObject());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
@ -1366,7 +1362,7 @@ public void testRenameUndo_2() throws Exception {
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
@ -1393,7 +1389,7 @@ public void testRenameUndo_3() throws Exception {
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3 = spy(dir3);
doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(),
(Snapshot) anyObject(), (INodeMap) anyObject());
(Snapshot) anyObject());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
@ -1420,8 +1416,7 @@ public void testRenameUndo_3() throws Exception {
INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
assertTrue(fooNode instanceof INodeReference.DstReference);
List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode
.asDirectory()).getDiffs().asList();
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
@ -1455,8 +1450,7 @@ public void testRenameUndo_3() throws Exception {
assertTrue(hdfs.exists(foo_s3));
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs = ((INodeDirectoryWithSnapshot) fooNode.asDirectory()).getDiffs()
.asList();
fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(2, fooDiffs.size());
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
assertEquals("s3", fooDiffs.get(1).snapshot.getRoot().getLocalName());
@ -1495,10 +1489,9 @@ public void testRenameUndo_4() throws Exception {
INodeDirectory mockDir3 = spy(dir3);
// fail the rename but succeed in undo
doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
anyBoolean(), (Snapshot) anyObject(), (INodeMap) anyObject());
Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(),
anyBoolean(), (Snapshot) anyObject(),
(INodeMap) anyObject())).thenReturn(false).thenCallRealMethod();
anyBoolean(), (Snapshot) anyObject());
Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(),
(Snapshot) anyObject())).thenReturn(false).thenCallRealMethod();
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
foo3Node.setParent(mockDir3);
@ -1561,7 +1554,7 @@ public void testRenameUndo_5() throws Exception {
.getChildrenList(null));
assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0);
assertTrue(fooNode.getClass() == INodeDirectoryWithSnapshot.class);
assertTrue(fooNode.asDirectory().isWithSnapshot());
INode barNode = fsdir.getINode4Write(bar.toString());
assertTrue(barNode.getClass() == INodeFile.class);
assertSame(fooNode, barNode.getParent());
@ -1637,7 +1630,7 @@ public void testRenameUndo_6() throws Exception {
.getChildrenList(null));
assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0);
assertTrue(fooNode.getClass() == INodeDirectoryWithSnapshot.class);
assertTrue(fooNode.asDirectory().isWithSnapshot());
assertSame(dir1Node, fooNode.getParent());
List<DirectoryDiff> diffList = ((INodeDirectorySnapshottable) dir1Node)
.getDiffs().asList();
@ -1656,7 +1649,7 @@ public void testRenameUndo_6() throws Exception {
.getChildrenList(null));
assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0);
assertTrue(subdir2Node.getClass() == INodeDirectoryWithSnapshot.class);
assertTrue(subdir2Node.asDirectory().isWithSnapshot());
assertSame(dir2Node, subdir2Node.getParent());
assertSame(subdir2Node, fsdir.getINode4Write(sub_dir2.toString()));
INode subsubdir2Node = fsdir.getINode4Write(subsub_dir2.toString());
@ -1669,7 +1662,7 @@ public void testRenameUndo_6() throws Exception {
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
diffList = ((INodeDirectoryWithSnapshot) subdir2Node).getDiffs().asList();
diffList = subdir2Node.asDirectory().getDiffs().asList();
assertEquals(0, diffList.size());
}
@ -1697,8 +1690,7 @@ public void testRenameUndo_7() throws Exception {
}
// check
INodeDirectoryWithSnapshot fooNode = (INodeDirectoryWithSnapshot) fsdir
.getINode4Write(foo.toString());
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
assertEquals(1, children.size());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
@ -1948,8 +1940,7 @@ public void testRenameDirAndDeleteSnapshot_3() throws Exception {
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(1, wc.getReferenceCount());
INodeDirectoryWithSnapshot fooNode =
(INodeDirectoryWithSnapshot) wc.getReferredINode().asDirectory();
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
assertEquals(1, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
@ -2017,8 +2008,7 @@ public void testRenameDirAndDeleteSnapshot_4() throws Exception {
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(2, wc.getReferenceCount());
INodeDirectoryWithSnapshot fooNode =
(INodeDirectoryWithSnapshot) wc.getReferredINode().asDirectory();
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
assertEquals(3, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
@ -2044,9 +2034,9 @@ public void testRenameDirAndDeleteSnapshot_4() throws Exception {
/**
* This test demonstrates that
* {@link INodeDirectoryWithSnapshot#removeChild(INode, Snapshot, INodeMap)}
* {@link INodeDirectory#removeChild(INode, Snapshot)}
* and
* {@link INodeDirectoryWithSnapshot#addChild(INode, boolean, Snapshot, INodeMap)}
* {@link INodeDirectory#addChild(INode, boolean, Snapshot)}
* should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the
* added/removed child should be recorded in snapshots.
*/
@ -2063,7 +2053,7 @@ public void testRenameDirAndDeleteSnapshot_5() throws Exception {
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
final Path bar = new Path(foo, "bar");
// create file bar, and foo will become an INodeDirectoryWithSnapshot
// create file bar, and foo will become an INodeDirectory with snapshot
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
// delete snapshot s1. now foo is not in any snapshot
hdfs.deleteSnapshot(dir1, "s1");
@ -2079,7 +2069,7 @@ public void testRenameDirAndDeleteSnapshot_5() throws Exception {
// delete /dir2/foo. Since it is not in any snapshot, we will call its
// destroy function. If we do not use isInLatestSnapshot in removeChild and
// addChild methods in INodeDirectoryWithSnapshot, the file bar will be
// addChild methods in INodeDirectory (with snapshot), the file bar will be
// stored in the deleted list of foo, and will be destroyed.
hdfs.delete(foo2, true);
@ -2130,8 +2120,8 @@ public void testRenameDirAndDeleteSnapshot_6() throws Exception {
// check the internal
assertFalse("after deleting s0, " + foo_s0 + " should not exist",
hdfs.exists(foo_s0));
INodeDirectoryWithSnapshot dir2Node = (INodeDirectoryWithSnapshot) fsdir
.getINode4Write(dir2.toString());
INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
.asDirectory();
assertTrue("the diff list of " + dir2
+ " should be empty after deleting s0", dir2Node.getDiffs().asList()
.isEmpty());
@ -2140,16 +2130,14 @@ public void testRenameDirAndDeleteSnapshot_6() throws Exception {
INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
assertTrue(fooRefNode instanceof INodeReference.DstReference);
INodeDirectory fooNode = fooRefNode.asDirectory();
// fooNode should be still INodeDirectoryWithSnapshot since we call
// fooNode should be still INodeDirectory (With Snapshot) since we call
// recordModification before the rename
assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
assertTrue(((INodeDirectoryWithSnapshot) fooNode).getDiffs().asList()
.isEmpty());
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.getDiffs().asList().isEmpty());
INodeDirectory barNode = fooNode.getChildrenList(null).get(0).asDirectory();
// bar should also be an INodeDirectoryWithSnapshot, and both of its diff
// bar should also be INodeDirectory (With Snapshot), and both of its diff
// list and children list are empty
assertTrue(((INodeDirectoryWithSnapshot) barNode).getDiffs().asList()
.isEmpty());
assertTrue(barNode.getDiffs().asList().isEmpty());
assertTrue(barNode.getChildrenList(null).isEmpty());
restartClusterAndCheckImage(true);
@ -2199,8 +2187,8 @@ public void testRenameDirAndDeleteSnapshot_7() throws Exception {
assertTrue(hdfs.exists(file_s0));
// check dir1: foo should be in the created list of s0
INodeDirectoryWithSnapshot dir1Node = (INodeDirectoryWithSnapshot) fsdir
.getINode4Write(dir1.toString());
INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
.asDirectory();
List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
assertEquals(1, dir1DiffList.size());
List<INode> dList = dir1DiffList.get(0).getChildrenDiff()
@ -2215,8 +2203,8 @@ public void testRenameDirAndDeleteSnapshot_7() throws Exception {
// check foo and its subtree
final Path newbar = new Path(newfoo, bar.getName());
INodeDirectoryWithSnapshot barNode = (INodeDirectoryWithSnapshot) fsdir
.getINode4Write(newbar.toString());
INodeDirectory barNode = fsdir.getINode4Write(newbar.toString())
.asDirectory();
assertSame(fooNode.asDirectory(), barNode.getParent());
// bar should only have a snapshot diff for s0
List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
@ -2229,8 +2217,8 @@ public void testRenameDirAndDeleteSnapshot_7() throws Exception {
// check dir2: a WithName instance for foo should be in the deleted list
// of the snapshot diff for s2
INodeDirectoryWithSnapshot dir2Node = (INodeDirectoryWithSnapshot) fsdir
.getINode4Write(dir2.toString());
INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
.asDirectory();
List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
// dir2Node should contain 2 snapshot diffs, one for s2, and the other was
// originally s1 (created when dir2 was transformed to a snapshottable dir),
@ -2287,8 +2275,7 @@ public void testCleanDstReference() throws Exception {
// make sure the file under bar is deleted
final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
"foo/bar");
INodeDirectoryWithSnapshot barNode = (INodeDirectoryWithSnapshot) fsdir
.getINode(barInS0.toString());
INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0, barNode.getChildrenList(null).size());
List<DirectoryDiff> diffList = barNode.getDiffs().asList();
assertEquals(1, diffList.size());

View File

@ -36,7 +36,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.junit.After;
import org.junit.Before;
@ -92,12 +92,12 @@ public void testSetQuota() throws Exception {
INodeDirectory subNode = INodeDirectory.valueOf(
fsdir.getINode(sub.toString()), sub);
// subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
assertFalse(subNode instanceof INodeDirectoryWithSnapshot);
assertFalse(subNode.isWithSnapshot());
hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
assertTrue(subNode.isQuotaSet());
assertFalse(subNode instanceof INodeDirectoryWithSnapshot);
assertFalse(subNode.isWithSnapshot());
}
/**
@ -150,8 +150,8 @@ public void testClearQuota() throws Exception {
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
INode subNode = fsdir.getINode4Write(subDir.toString());
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
List<DirectoryDiff> diffList = ((INodeDirectoryWithSnapshot) subNode).getDiffs().asList();
assertTrue(subNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
assertEquals(1, diffList.size());
assertEquals("s2", Snapshot.getSnapshotName(diffList.get(0).snapshot));
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);

View File

@ -51,7 +51,7 @@
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
@ -311,9 +311,9 @@ public void testDeleteCurrentFileDirectory() throws Exception {
// make sure the whole subtree of sub is stored correctly in snapshot
Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1",
sub.getName());
INodeDirectoryWithSnapshot snapshotNode4Sub =
(INodeDirectoryWithSnapshot) fsdir.getINode(snapshotSub.toString());
assertEquals(INodeDirectoryWithSnapshot.class, snapshotNode4Sub.getClass());
INodeDirectory snapshotNode4Sub = fsdir.getINode(snapshotSub.toString())
.asDirectory();
assertTrue(snapshotNode4Sub.isWithSnapshot());
// the snapshot copy of sub has only one child subsub.
// newFile should have been destroyed
assertEquals(1, snapshotNode4Sub.getChildrenList(null).size());
@ -323,8 +323,7 @@ public void testDeleteCurrentFileDirectory() throws Exception {
// check the snapshot copy of subsub, which is contained in the subtree of
// sub's snapshot copy
INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(null).get(0);
assertEquals(INodeDirectoryWithSnapshot.class,
snapshotNode4Subsub.getClass());
assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
// check the children of subsub
INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
@ -478,8 +477,8 @@ public void testDeleteEarliestSnapshot2() throws Exception {
DirectoryDiffList diffList = dirNode.getDiffs();
assertEquals(1, diffList.asList().size());
assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName());
diffList = ((INodeDirectoryWithSnapshot) fsdir.getINode(
metaChangeDir.toString())).getDiffs();
diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory()
.getDiffs();
assertEquals(0, diffList.asList().size());
// check 2. noChangeDir and noChangeFile are still there

View File

@ -37,7 +37,7 @@
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;

View File

@ -237,6 +237,9 @@ Release 2.4.0 - UNRELEASED
MAPREDUCE-5656. bzip2 codec can drop records when reading data in splits
(jlowe)
MAPREDUCE-5623. TestJobCleanup fails because of RejectedExecutionException
and NPE. (jlowe)
Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -195,8 +195,7 @@ private void testFailedJob(String fileName,
RunningJob job = jobClient.submitJob(jc);
JobID id = job.getID();
job.waitForCompletion();
Counters counters = job.getCounters();
assertTrue("No. of failed maps should be 1",counters.getCounter(JobCounter.NUM_FAILED_MAPS) == 1);
assertEquals("Job did not fail", JobStatus.FAILED, job.getJobState());
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
@ -242,9 +241,7 @@ private void testKilledJob(String fileName,
job.killJob(); // kill the job
job.waitForCompletion(); // wait for the job to complete
counters = job.getCounters();
assertTrue("No. of killed maps should be 1", counters.getCounter(JobCounter.NUM_KILLED_MAPS) == 1);
assertEquals("Job was not killed", JobStatus.KILLED, job.getJobState());
if (fileName != null) {
Path testFile = new Path(outDir, fileName);

View File

@ -168,6 +168,13 @@ Release 2.4.0 - UNRELEASED
YARN-1311. Fixed app specific scheduler-events' names to be app-attempt
based. (vinodkv via jianhe)
YARN-1485. Modified RM HA configuration validation to also ensure that
service-address configuration are configured for every RM. (Xuan Gong via
vinodkv)
YARN-1435. Modified Distributed Shell to accept either the command or the
custom script. (Xuan Gong via zjshen)
OPTIMIZATIONS
BUG FIXES
@ -239,6 +246,12 @@ Release 2.4.0 - UNRELEASED
YARN-1405. Fixed ResourceManager to not hang when init/start fails with an
exception w.r.t state-store. (Jian He via vinodkv)
YARN-1505. Fixed Webapplication proxy server to not hardcode its bind
address. (Xuan Gong via vinodkv)
YARN-1145. Fixed a potential file-handle leak in the web interface for
displaying aggregated logs. (Rohith Sharma via vinodkv)
Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -58,13 +58,17 @@ public static boolean isHAEnabled(Configuration conf) {
*/
public static void verifyAndSetConfiguration(Configuration conf)
throws YarnRuntimeException {
verifyAndSetRMHAIds(conf);
verifyAndSetRMHAId(conf);
verifyAndSetRMHAIdsList(conf);
verifyAndSetCurrentRMHAId(conf);
verifyAndSetAllServiceAddresses(conf);
}
private static void verifyAndSetRMHAIds(Configuration conf) {
/**
* Verify configuration that there are at least two RM-ids
* and RPC addresses are specified for each RM-id.
* Then set the RM-ids.
*/
private static void verifyAndSetRMHAIdsList(Configuration conf) {
Collection<String> ids =
conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS);
if (ids.size() < 2) {
@ -76,6 +80,24 @@ private static void verifyAndSetRMHAIds(Configuration conf) {
StringBuilder setValue = new StringBuilder();
for (String id: ids) {
// verify the RM service addresses configurations for every RMIds
for (String prefix : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
String confKey = null;
try {
confKey = addSuffix(prefix, id);
if (conf.getTrimmed(confKey) == null) {
throwBadConfigurationException(getNeedToSetValueMessage(confKey));
}
} catch (IllegalArgumentException iae) {
String errmsg = iae.getMessage();
if (confKey == null) {
// Error at addSuffix
errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID,
getRMHAId(conf));
}
throwBadConfigurationException(errmsg);
}
}
setValue.append(id);
setValue.append(",");
}
@ -83,7 +105,7 @@ private static void verifyAndSetRMHAIds(Configuration conf) {
setValue.substring(0, setValue.length() - 1));
}
private static void verifyAndSetRMHAId(Configuration conf) {
private static void verifyAndSetCurrentRMHAId(Configuration conf) {
String rmId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
if (rmId == null) {
throwBadConfigurationException(

View File

@ -218,13 +218,14 @@ public class ApplicationMaster {
private long shellScriptPathLen = 0;
// Hardcoded path to shell script in launch container's local env
private final String ExecShellStringPath = "ExecShellScript.sh";
private static final String ExecShellStringPath = "ExecShellScript.sh";
private static final String ExecBatScripStringtPath = "ExecBatScript.bat";
// Hardcoded path to custom log_properties
private final String log4jPath = "log4j.properties";
private static final String log4jPath = "log4j.properties";
private final String shellCommandPath = "shellCommands";
private final String shellArgsPath = "shellArgs";
private static final String shellCommandPath = "shellCommands";
private static final String shellArgsPath = "shellArgs";
private volatile boolean done;
private volatile boolean success;
@ -234,6 +235,9 @@ public class ApplicationMaster {
// Launch threads
private List<Thread> launchThreads = new ArrayList<Thread>();
private final String linux_bash_command = "bash";
private final String windows_command = "cmd /c";
/**
* @param args Command line args
*/
@ -308,8 +312,6 @@ public boolean init(String[] args) throws ParseException, IOException {
Options opts = new Options();
opts.addOption("app_attempt_id", true,
"App Attempt ID. Not to be used unless for testing purposes");
opts.addOption("shell_script", true,
"Location of the shell script to be executed");
opts.addOption("shell_env", true,
"Environment for shell script. Specified as env_key=env_val pairs");
opts.addOption("container_memory", true,
@ -387,11 +389,15 @@ public boolean init(String[] args) throws ParseException, IOException {
+ appAttemptID.getApplicationId().getClusterTimestamp()
+ ", attemptId=" + appAttemptID.getAttemptId());
if (!fileExist(shellCommandPath)) {
if (!fileExist(shellCommandPath)
&& envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION).isEmpty()) {
throw new IllegalArgumentException(
"No shell command specified to be executed by application master");
"No shell command or shell script specified to be executed by application master");
}
if (fileExist(shellCommandPath)) {
shellCommand = readContent(shellCommandPath);
}
shellCommand = readContent(shellCommandPath);
if (fileExist(shellArgsPath)) {
shellArgs = readContent(shellArgsPath);
@ -847,7 +853,9 @@ public void run() {
}
shellRsrc.setTimestamp(shellScriptPathTimestamp);
shellRsrc.setSize(shellScriptPathLen);
localResources.put(ExecShellStringPath, shellRsrc);
localResources.put(Shell.WINDOWS ? ExecBatScripStringtPath :
ExecShellStringPath, shellRsrc);
shellCommand = Shell.WINDOWS ? windows_command : linux_bash_command;
}
ctx.setLocalResources(localResources);
@ -858,7 +866,8 @@ public void run() {
vargs.add(shellCommand);
// Set shell script path
if (!shellScriptPath.isEmpty()) {
vargs.add(ExecShellStringPath);
vargs.add(Shell.WINDOWS ? ExecBatScripStringtPath
: ExecShellStringPath);
}
// Set args for the shell command if any

View File

@ -49,6 +49,7 @@
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
@ -167,11 +168,14 @@ public class Client {
// Command line options
private Options opts;
private final String shellCommandPath = "shellCommands";
private final String shellArgsPath = "shellArgs";
private final String appMasterJarPath = "AppMaster.jar";
private static final String shellCommandPath = "shellCommands";
private static final String shellArgsPath = "shellArgs";
private static final String appMasterJarPath = "AppMaster.jar";
// Hardcoded path to custom log_properties
private final String log4jPath = "log4j.properties";
private static final String log4jPath = "log4j.properties";
private static final String linuxShellPath = "ExecShellScript.sh";
private static final String windowBatPath = "ExecBatScript.bat";
/**
* @param args Command line arguments
@ -225,8 +229,11 @@ public Client(Configuration conf) throws Exception {
opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master");
opts.addOption("master_vcores", true, "Amount of virtual cores to be requested to run the application master");
opts.addOption("jar", true, "Jar file containing the application master");
opts.addOption("shell_command", true, "Shell command to be executed by the Application Master");
opts.addOption("shell_script", true, "Location of the shell script to be executed");
opts.addOption("shell_command", true, "Shell command to be executed by " +
"the Application Master. Can only specify either --shell_command " +
"or --shell_script");
opts.addOption("shell_script", true, "Location of the shell script to be " +
"executed. Can only specify either --shell_command or --shell_script");
opts.addOption("shell_args", true, "Command line args for the shell script." +
"Multiple args can be separated by empty space.");
opts.getOption("shell_args").setArgs(Option.UNLIMITED_VALUES);
@ -308,12 +315,15 @@ public boolean init(String[] args) throws ParseException {
appMasterJar = cliParser.getOptionValue("jar");
if (!cliParser.hasOption("shell_command")) {
throw new IllegalArgumentException("No shell command specified to be executed by application master");
}
shellCommand = cliParser.getOptionValue("shell_command");
if (cliParser.hasOption("shell_script")) {
if (!cliParser.hasOption("shell_command") && !cliParser.hasOption("shell_script")) {
throw new IllegalArgumentException(
"No shell command or shell script specified to be executed by application master");
} else if (cliParser.hasOption("shell_command") && cliParser.hasOption("shell_script")) {
throw new IllegalArgumentException("Can not specify shell_command option " +
"and shell_script option at the same time");
} else if (cliParser.hasOption("shell_command")) {
shellCommand = cliParser.getOptionValue("shell_command");
} else {
shellScriptPath = cliParser.getOptionValue("shell_script");
}
if (cliParser.hasOption("shell_args")) {
@ -466,8 +476,11 @@ public boolean run() throws IOException, YarnException {
long hdfsShellScriptTimestamp = 0;
if (!shellScriptPath.isEmpty()) {
Path shellSrc = new Path(shellScriptPath);
String shellPathSuffix = appName + "/" + appId.getId() + "/ExecShellScript.sh";
Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
String shellPathSuffix =
appName + "/" + appId.getId() + "/"
+ (Shell.WINDOWS ? windowBatPath : linuxShellPath);
Path shellDst =
new Path(fs.getHomeDirectory(), shellPathSuffix);
fs.copyFromLocalFile(false, true, shellSrc, shellDst);
hdfsShellScriptLocation = shellDst.toUri().toString();
FileStatus shellFileStatus = fs.getFileStatus(shellDst);

View File

@ -303,6 +303,54 @@ public void testDSShellWithMultipleArgs() throws Exception {
verifyContainerLog(4, expectedContent, false, "");
}
@Test(timeout=90000)
public void testDSShellWithShellScript() throws Exception {
final File basedir =
new File("target", TestDistributedShell.class.getName());
final File tmpDir = new File(basedir, "tmpDir");
tmpDir.mkdirs();
final File customShellScript = new File(tmpDir, "custom_script.sh");
if (customShellScript.exists()) {
customShellScript.delete();
}
if (!customShellScript.createNewFile()) {
Assert.fail("Can not create custom shell script file.");
}
PrintWriter fileWriter = new PrintWriter(customShellScript);
// set the output to DEBUG level
fileWriter.write("echo testDSShellWithShellScript");
fileWriter.close();
System.out.println(customShellScript.getAbsolutePath());
String[] args = {
"--jar",
APPMASTER_JAR,
"--num_containers",
"1",
"--shell_script",
customShellScript.getAbsolutePath(),
"--master_memory",
"512",
"--master_vcores",
"2",
"--container_memory",
"128",
"--container_vcores",
"1"
};
LOG.info("Initializing DS Client");
final Client client =
new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess = client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result = client.run();
LOG.info("Client run completed. Result=" + result);
List<String> expectedContent = new ArrayList<String>();
expectedContent.add("testDSShellWithShellScript");
verifyContainerLog(1, expectedContent, false, "");
}
@Test(timeout=90000)
public void testDSShellWithInvalidArgs() throws Exception {
Client client = new Client(new Configuration(yarnCluster.getConfig()));
@ -399,6 +447,58 @@ public void testDSShellWithInvalidArgs() throws Exception {
Assert.assertTrue("The throw exception is not expected",
e.getMessage().contains("Invalid virtual cores specified"));
}
LOG.info("Initializing DS Client with --shell_command and --shell_script");
try {
String[] args = {
"--jar",
APPMASTER_JAR,
"--num_containers",
"2",
"--shell_command",
Shell.WINDOWS ? "dir" : "ls",
"--master_memory",
"512",
"--master_vcores",
"2",
"--container_memory",
"128",
"--container_vcores",
"1",
"--shell_script",
"test.sh"
};
client.init(args);
Assert.fail("Exception is expected");
} catch (IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",
e.getMessage().contains("Can not specify shell_command option " +
"and shell_script option at the same time"));
}
LOG.info("Initializing DS Client without --shell_command and --shell_script");
try {
String[] args = {
"--jar",
APPMASTER_JAR,
"--num_containers",
"2",
"--master_memory",
"512",
"--master_vcores",
"2",
"--container_memory",
"128",
"--container_vcores",
"1"
};
client.init(args);
Assert.fail("Exception is expected");
} catch (IllegalArgumentException e) {
Assert.assertTrue("The throw exception is not expected",
e.getMessage().contains("No shell command or shell script specified " +
"to be executed by application master"));
}
}
protected static void waitForNMToRegister(NodeManager nm)
@ -490,10 +590,10 @@ private int verifyContainerLog(int containerNum,
for (File output : containerFiles[i].listFiles()) {
if (output.getName().trim().contains("stdout")) {
BufferedReader br = null;
List<String> stdOutContent = new ArrayList<String>();
try {
String sCurrentLine;
br = new BufferedReader(new FileReader(output));
int numOfline = 0;
while ((sCurrentLine = br.readLine()) != null) {
@ -502,12 +602,25 @@ private int verifyContainerLog(int containerNum,
numOfWords++;
}
} else if (output.getName().trim().equals("stdout")){
Assert.assertEquals("The current is" + sCurrentLine,
expectedContent.get(numOfline), sCurrentLine.trim());
numOfline++;
if (! Shell.WINDOWS) {
Assert.assertEquals("The current is" + sCurrentLine,
expectedContent.get(numOfline), sCurrentLine.trim());
numOfline++;
} else {
stdOutContent.add(sCurrentLine.trim());
}
}
}
/* By executing bat script using cmd /c,
* it will output all contents from bat script first
* It is hard for us to do check line by line
* Simply check whether output from bat file contains
* all the expected messages
*/
if (Shell.WINDOWS && !count
&& output.getName().trim().equals("stdout")) {
Assert.assertTrue(stdOutContent.containsAll(expectedContent));
}
} catch (IOException e) {
e.printStackTrace();
} finally {
@ -523,6 +636,5 @@ private int verifyContainerLog(int containerNum,
}
return numOfWords;
}
}

View File

@ -53,6 +53,7 @@
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.file.tfile.TFile;
@ -294,7 +295,7 @@ public void append(LogKey logKey, LogValue logValue) throws IOException {
out.close();
}
public void closeWriter() {
public void close() {
try {
this.writer.close();
} catch (IOException e) {
@ -569,9 +570,8 @@ public static void readAContainerLogsForALogType(
out.println("");
}
public void close() throws IOException {
this.scanner.close();
this.fsDataIStream.close();
public void close() {
IOUtils.cleanup(LOG, scanner, reader, fsDataIStream);
}
}

View File

@ -59,109 +59,113 @@ public class AggregatedLogsBlock extends HtmlBlock {
@Override
protected void render(Block html) {
ContainerId containerId = verifyAndGetContainerId(html);
NodeId nodeId = verifyAndGetNodeId(html);
String appOwner = verifyAndGetAppOwner(html);
LogLimits logLimits = verifyAndGetLogLimits(html);
if (containerId == null || nodeId == null || appOwner == null
|| appOwner.isEmpty() || logLimits == null) {
return;
}
ApplicationId applicationId =
containerId.getApplicationAttemptId().getApplicationId();
String logEntity = $(ENTITY_STRING);
if (logEntity == null || logEntity.isEmpty()) {
logEntity = containerId.toString();
}
if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
html.h1()
._("Aggregation is not enabled. Try the nodemanager at " + nodeId)
._();
return;
}
Path remoteRootLogDir =
new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
AggregatedLogFormat.LogReader reader = null;
try {
reader =
new AggregatedLogFormat.LogReader(conf,
LogAggregationUtils.getRemoteNodeLogFileForApp(
remoteRootLogDir, applicationId, appOwner, nodeId,
LogAggregationUtils.getRemoteNodeLogDirSuffix(conf)));
} catch (FileNotFoundException e) {
// ACLs not available till the log file is opened.
html.h1()
._("Logs not available for "
+ logEntity
+ ". Aggregation may not be complete, "
+ "Check back later or try the nodemanager at "
+ nodeId)._();
return;
} catch (IOException e) {
html.h1()._("Error getting logs for " + logEntity)._();
LOG.error("Error getting logs for " + logEntity, e);
return;
}
String owner = null;
Map<ApplicationAccessType, String> appAcls = null;
try {
owner = reader.getApplicationOwner();
appAcls = reader.getApplicationAcls();
} catch (IOException e) {
html.h1()._("Error getting logs for " + logEntity)._();
LOG.error("Error getting logs for " + logEntity, e);
return;
}
ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf);
aclsManager.addApplication(applicationId, appAcls);
String remoteUser = request().getRemoteUser();
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
if (callerUGI != null
&& !aclsManager.checkAccess(callerUGI, ApplicationAccessType.VIEW_APP,
owner, applicationId)) {
html.h1()
._("User [" + remoteUser
+ "] is not authorized to view the logs for " + logEntity)._();
return;
}
String desiredLogType = $(CONTAINER_LOG_TYPE);
try {
AggregatedLogFormat.ContainerLogsReader logReader =
reader.getContainerLogsReader(containerId);
if (logReader == null) {
html.h1()._(
"Logs not available for " + logEntity
+ ". Could be caused by the rentention policy")._();
ContainerId containerId = verifyAndGetContainerId(html);
NodeId nodeId = verifyAndGetNodeId(html);
String appOwner = verifyAndGetAppOwner(html);
LogLimits logLimits = verifyAndGetLogLimits(html);
if (containerId == null || nodeId == null || appOwner == null
|| appOwner.isEmpty() || logLimits == null) {
return;
}
boolean foundLog = readContainerLogs(html, logReader, logLimits,
desiredLogType);
ApplicationId applicationId = containerId.getApplicationAttemptId()
.getApplicationId();
String logEntity = $(ENTITY_STRING);
if (logEntity == null || logEntity.isEmpty()) {
logEntity = containerId.toString();
}
if (!foundLog) {
if (desiredLogType.isEmpty()) {
html.h1("No logs available for container " + containerId.toString());
} else {
html.h1("Unable to locate '" + desiredLogType
+ "' log for container " + containerId.toString());
if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
html.h1()
._("Aggregation is not enabled. Try the nodemanager at " + nodeId)
._();
return;
}
Path remoteRootLogDir = new Path(conf.get(
YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
try {
reader = new AggregatedLogFormat.LogReader(conf,
LogAggregationUtils.getRemoteNodeLogFileForApp(remoteRootLogDir,
applicationId, appOwner, nodeId,
LogAggregationUtils.getRemoteNodeLogDirSuffix(conf)));
} catch (FileNotFoundException e) {
// ACLs not available till the log file is opened.
html.h1()
._("Logs not available for " + logEntity
+ ". Aggregation may not be complete, "
+ "Check back later or try the nodemanager at " + nodeId)._();
return;
} catch (IOException e) {
html.h1()._("Error getting logs for " + logEntity)._();
LOG.error("Error getting logs for " + logEntity, e);
return;
}
String owner = null;
Map<ApplicationAccessType, String> appAcls = null;
try {
owner = reader.getApplicationOwner();
appAcls = reader.getApplicationAcls();
} catch (IOException e) {
html.h1()._("Error getting logs for " + logEntity)._();
LOG.error("Error getting logs for " + logEntity, e);
return;
}
ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf);
aclsManager.addApplication(applicationId, appAcls);
String remoteUser = request().getRemoteUser();
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
if (callerUGI != null
&& !aclsManager.checkAccess(callerUGI,
ApplicationAccessType.VIEW_APP, owner, applicationId)) {
html.h1()
._("User [" + remoteUser
+ "] is not authorized to view the logs for " + logEntity)._();
return;
}
String desiredLogType = $(CONTAINER_LOG_TYPE);
try {
AggregatedLogFormat.ContainerLogsReader logReader = reader
.getContainerLogsReader(containerId);
if (logReader == null) {
html.h1()
._("Logs not available for " + logEntity
+ ". Could be caused by the rentention policy")._();
return;
}
boolean foundLog = readContainerLogs(html, logReader, logLimits,
desiredLogType);
if (!foundLog) {
if (desiredLogType.isEmpty()) {
html.h1("No logs available for container " + containerId.toString());
} else {
html.h1("Unable to locate '" + desiredLogType
+ "' log for container " + containerId.toString());
}
return;
}
} catch (IOException e) {
html.h1()._("Error getting logs for " + logEntity)._();
LOG.error("Error getting logs for " + logEntity, e);
return;
}
} catch (IOException e) {
html.h1()._("Error getting logs for " + logEntity)._();
LOG.error("Error getting logs for " + logEntity, e);
return;
} finally {
if (reader != null) {
reader.close();
}
}
}

View File

@ -39,6 +39,7 @@ public class TestHAUtil {
private static final String RM1_ADDRESS_UNTRIMMED = " \t\t\n 1.2.3.4:8021 \n\t ";
private static final String RM1_ADDRESS = RM1_ADDRESS_UNTRIMMED.trim();
private static final String RM2_ADDRESS = "localhost:8022";
private static final String RM3_ADDRESS = "localhost:8033";
private static final String RM1_NODE_ID_UNTRIMMED = "rm1 ";
private static final String RM1_NODE_ID = RM1_NODE_ID_UNTRIMMED.trim();
private static final String RM2_NODE_ID = "rm2";
@ -113,8 +114,13 @@ public void testVerifyAndSetConfiguration() throws Exception {
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS, RM_INVALID_NODE_ID + ","
+ RM1_NODE_ID);
// simulate the case YarnConfiguration.RM_HA_ID is not set
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
+ RM2_NODE_ID);
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
} catch (YarnRuntimeException e) {
@ -165,6 +171,7 @@ public void testVerifyAndSetConfiguration() throws Exception {
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
conf.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
}
try {
HAUtil.verifyAndSetConfiguration(conf);

View File

@ -114,7 +114,7 @@ public void testReadAcontainerLogs1() throws Exception {
testContainerId, ugi.getShortUserName());
logWriter.append(logKey, logValue);
logWriter.closeWriter();
logWriter.close();
// make sure permission are correct on the file
FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile);
@ -194,7 +194,7 @@ public void testContainerLogsFileAccess() throws IOException {
ugi.getShortUserName());
logWriter.append(logKey, logValue);
logWriter.closeWriter();
logWriter.close();
BufferedReader in =
new BufferedReader(new FileReader(new File(remoteAppLogFile

View File

@ -229,7 +229,7 @@ private void writeLog(Configuration configuration, String user)
writer.append(new AggregatedLogFormat.LogKey("container_0_0001_01_000001"),
new AggregatedLogFormat.LogValue(rootLogDirs, containerId,UserGroupInformation.getCurrentUser().getShortUserName()));
writer.closeWriter();
writer.close();
}
private void writeLogs(String dirName) throws Exception {

View File

@ -178,7 +178,7 @@ private void doAppLogAggregation() {
localAppLogDirs);
if (this.writer != null) {
this.writer.closeWriter();
this.writer.close();
LOG.info("Finished aggregate log-file for app " + this.applicationId);
}

View File

@ -143,7 +143,9 @@ private Configuration createHARMConf(
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_ADDRESS, hostPort);
conf.set(YarnConfiguration.RM_HA_ID, rmId);
for (String rpcAddress : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
conf.set(HAUtil.addSuffix(rpcAddress, rmId), "localhost:0");
for (String id : HAUtil.getRMHAIds(conf)) {
conf.set(HAUtil.addSuffix(rpcAddress, id), "localhost:0");
}
}
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_ADMIN_ADDRESS, rmId),
"localhost:" + adminPort);

View File

@ -296,7 +296,9 @@ private void setHARMConfiguration(Configuration conf) {
String hostname = MiniYARNCluster.getHostname();
conf.set(YarnConfiguration.RM_HA_ID, rmId);
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
conf.set(HAUtil.addSuffix(confKey, rmId), hostname + ":0");
for (String id : HAUtil.getRMHAIds(conf)) {
conf.set(HAUtil.addSuffix(confKey, id), hostname + ":0");
}
}
}

View File

@ -33,6 +33,8 @@
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import com.google.common.annotations.VisibleForTesting;
public class WebAppProxy extends AbstractService {
public static final String FETCHER_ATTRIBUTE= "AppUrlFetcher";
public static final String IS_SECURITY_ENABLED_ATTRIBUTE = "IsSecurityEnabled";
@ -126,4 +128,9 @@ public void join() {
}
}
}
@VisibleForTesting
String getBindAddress() {
return bindAddress + ":" + port;
}
}

View File

@ -77,7 +77,8 @@ public static void main(String[] args) {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
try {
WebAppProxyServer proxyServer = startServer();
YarnConfiguration configuration = new YarnConfiguration();
WebAppProxyServer proxyServer = startServer(configuration);
proxyServer.proxy.join();
} catch (Throwable t) {
LOG.fatal("Error starting Proxy server", t);
@ -90,12 +91,11 @@ public static void main(String[] args) {
*
* @return proxy server instance.
*/
protected static WebAppProxyServer startServer() throws Exception {
protected static WebAppProxyServer startServer(Configuration configuration)
throws Exception {
WebAppProxyServer proxy = new WebAppProxyServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration configuration = new YarnConfiguration();
configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9099");
proxy.init(configuration);
proxy.start();
return proxy;

View File

@ -20,6 +20,7 @@
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
@ -29,11 +30,12 @@
public class TestWebAppProxyServer {
private WebAppProxyServer webAppProxy = null;
private final String proxyAddress = "0.0.0.0:8888";
@Before
public void setUp() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.PROXY_ADDRESS, "0.0.0.0:8888");
conf.set(YarnConfiguration.PROXY_ADDRESS, proxyAddress);
webAppProxy = new WebAppProxyServer();
webAppProxy.init(conf);
}
@ -47,6 +49,11 @@ public void tearDown() throws Exception {
public void testStart() {
assertEquals(STATE.INITED, webAppProxy.getServiceState());
webAppProxy.start();
for (Service service : webAppProxy.getServices()) {
if (service instanceof WebAppProxy) {
assertEquals(((WebAppProxy) service).getBindAddress(), proxyAddress);
}
}
assertEquals(STATE.STARTED, webAppProxy.getServiceState());
}
}

View File

@ -184,8 +184,10 @@ public void testWebAppProxyServlet() throws Exception {
@Test(timeout=5000)
public void testWebAppProxyServerMainMethod() throws Exception {
WebAppProxyServer mainServer = null;
Configuration conf = new YarnConfiguration();
conf.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9099");
try {
mainServer = WebAppProxyServer.startServer();
mainServer = WebAppProxyServer.startServer(conf);
int counter = 20;
URL wrongUrl = new URL("http://localhost:9099/proxy/app");