Merge trunk to HDFS-4685.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4685@1551332 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
740605cde6
|
@ -249,9 +249,16 @@ Trunk (Unreleased)
|
||||||
HDFS-5647. Merge INodeDirectory.Feature and INodeFile.Feature. (Haohui Mai
|
HDFS-5647. Merge INodeDirectory.Feature and INodeFile.Feature. (Haohui Mai
|
||||||
via jing9)
|
via jing9)
|
||||||
|
|
||||||
|
HDFS-5632. Flatten INodeDirectory hierarchy: Replace
|
||||||
|
INodeDirectoryWithSnapshot with DirectoryWithSnapshotFeature.
|
||||||
|
(jing9 via szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
|
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
|
||||||
|
|
||||||
|
HDFS-5665. Remove the unnecessary writeLock while initializing CacheManager
|
||||||
|
in FsNameSystem Ctor. (Uma Maheswara Rao G via Andrew Wang)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik Kumar
|
HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik Kumar
|
||||||
via cmccabe)
|
via cmccabe)
|
||||||
|
@ -447,6 +454,12 @@ Trunk (Unreleased)
|
||||||
|
|
||||||
HDFS-5626. dfsadmin -report shows incorrect cache values. (cmccabe)
|
HDFS-5626. dfsadmin -report shows incorrect cache values. (cmccabe)
|
||||||
|
|
||||||
|
HDFS-5406. Send incremental block reports for all storages in a
|
||||||
|
single call. (Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-5454. DataNode UUID should be assigned prior to FsDataset
|
||||||
|
initialization. (Arpit Agarwal)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-4985. Add storage type to the protocol and expose it in block report
|
HDFS-4985. Add storage type to the protocol and expose it in block report
|
||||||
|
@ -810,6 +823,9 @@ Release 2.3.0 - UNRELEASED
|
||||||
HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
|
HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
|
||||||
jing9)
|
jing9)
|
||||||
|
|
||||||
|
HDFS-5592. statechangeLog of completeFile should be logged only in case of success.
|
||||||
|
(Vinayakumar via umamahesh)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -953,6 +969,8 @@ Release 2.3.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
|
HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
|
||||||
|
|
||||||
|
HDFS-5666. Fix inconsistent synchronization in BPOfferService (jxiang via cmccabe)
|
||||||
|
|
||||||
Release 2.2.0 - 2013-10-13
|
Release 2.2.0 - 2013-10-13
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -148,7 +148,7 @@ class BPOfferService {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
String getBlockPoolId() {
|
synchronized String getBlockPoolId() {
|
||||||
if (bpNSInfo != null) {
|
if (bpNSInfo != null) {
|
||||||
return bpNSInfo.getBlockPoolID();
|
return bpNSInfo.getBlockPoolID();
|
||||||
} else {
|
} else {
|
||||||
|
@ -163,7 +163,7 @@ class BPOfferService {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public synchronized String toString() {
|
||||||
if (bpNSInfo == null) {
|
if (bpNSInfo == null) {
|
||||||
// If we haven't yet connected to our NN, we don't yet know our
|
// If we haven't yet connected to our NN, we don't yet know our
|
||||||
// own block pool ID.
|
// own block pool ID.
|
||||||
|
|
|
@ -22,6 +22,7 @@ import static org.apache.hadoop.util.Time.now;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.SocketTimeoutException;
|
import java.net.SocketTimeoutException;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -273,7 +274,8 @@ class BPServiceActor implements Runnable {
|
||||||
private void reportReceivedDeletedBlocks() throws IOException {
|
private void reportReceivedDeletedBlocks() throws IOException {
|
||||||
|
|
||||||
// Generate a list of the pending reports for each storage under the lock
|
// Generate a list of the pending reports for each storage under the lock
|
||||||
Map<String, ReceivedDeletedBlockInfo[]> blockArrays = Maps.newHashMap();
|
ArrayList<StorageReceivedDeletedBlocks> reports =
|
||||||
|
new ArrayList<StorageReceivedDeletedBlocks>(pendingIncrementalBRperStorage.size());
|
||||||
synchronized (pendingIncrementalBRperStorage) {
|
synchronized (pendingIncrementalBRperStorage) {
|
||||||
for (Map.Entry<String, PerStoragePendingIncrementalBR> entry :
|
for (Map.Entry<String, PerStoragePendingIncrementalBR> entry :
|
||||||
pendingIncrementalBRperStorage.entrySet()) {
|
pendingIncrementalBRperStorage.entrySet()) {
|
||||||
|
@ -286,33 +288,34 @@ class BPServiceActor implements Runnable {
|
||||||
pendingReceivedRequests =
|
pendingReceivedRequests =
|
||||||
(pendingReceivedRequests > rdbi.length ?
|
(pendingReceivedRequests > rdbi.length ?
|
||||||
(pendingReceivedRequests - rdbi.length) : 0);
|
(pendingReceivedRequests - rdbi.length) : 0);
|
||||||
blockArrays.put(storageUuid, rdbi);
|
reports.add(new StorageReceivedDeletedBlocks(storageUuid, rdbi));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send incremental block reports to the Namenode outside the lock
|
if (reports.size() == 0) {
|
||||||
for (Map.Entry<String, ReceivedDeletedBlockInfo[]> entry :
|
// Nothing new to report.
|
||||||
blockArrays.entrySet()) {
|
return;
|
||||||
final String storageUuid = entry.getKey();
|
}
|
||||||
final ReceivedDeletedBlockInfo[] rdbi = entry.getValue();
|
|
||||||
|
|
||||||
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
|
// Send incremental block reports to the Namenode outside the lock
|
||||||
storageUuid, rdbi) };
|
boolean success = false;
|
||||||
boolean success = false;
|
try {
|
||||||
try {
|
bpNamenode.blockReceivedAndDeleted(bpRegistration,
|
||||||
bpNamenode.blockReceivedAndDeleted(bpRegistration,
|
bpos.getBlockPoolId(),
|
||||||
bpos.getBlockPoolId(), report);
|
reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]));
|
||||||
success = true;
|
success = true;
|
||||||
} finally {
|
} finally {
|
||||||
if (!success) {
|
if (!success) {
|
||||||
synchronized (pendingIncrementalBRperStorage) {
|
synchronized (pendingIncrementalBRperStorage) {
|
||||||
|
for (StorageReceivedDeletedBlocks report : reports) {
|
||||||
// If we didn't succeed in sending the report, put all of the
|
// If we didn't succeed in sending the report, put all of the
|
||||||
// blocks back onto our queue, but only in the case where we
|
// blocks back onto our queue, but only in the case where we
|
||||||
// didn't put something newer in the meantime.
|
// didn't put something newer in the meantime.
|
||||||
PerStoragePendingIncrementalBR perStorageMap =
|
PerStoragePendingIncrementalBR perStorageMap =
|
||||||
pendingIncrementalBRperStorage.get(storageUuid);
|
pendingIncrementalBRperStorage.get(report.getStorageID());
|
||||||
pendingReceivedRequests += perStorageMap.putMissingBlockInfos(rdbi);
|
pendingReceivedRequests +=
|
||||||
|
perStorageMap.putMissingBlockInfos(report.getBlocks());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -815,8 +815,6 @@ public class DataNode extends Configured
|
||||||
storageInfo = new StorageInfo(nsInfo);
|
storageInfo = new StorageInfo(nsInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
checkDatanodeUuid();
|
|
||||||
|
|
||||||
DatanodeID dnId = new DatanodeID(
|
DatanodeID dnId = new DatanodeID(
|
||||||
streamingAddr.getAddress().getHostAddress(), hostName,
|
streamingAddr.getAddress().getHostAddress(), hostName,
|
||||||
storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
|
storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
|
||||||
|
@ -965,6 +963,9 @@ public class DataNode extends Configured
|
||||||
+ ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
|
+ ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If this is a newly formatted DataNode then assign a new DatanodeUuid.
|
||||||
|
checkDatanodeUuid();
|
||||||
|
|
||||||
synchronized(this) {
|
synchronized(this) {
|
||||||
if (data == null) {
|
if (data == null) {
|
||||||
data = factory.newInstance(this, storage, conf);
|
data = factory.newInstance(this, storage, conf);
|
||||||
|
|
|
@ -154,6 +154,10 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
|
||||||
verifyDiskspaceQuota(dsDelta);
|
verifyDiskspaceQuota(dsDelta);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boolean isQuotaSet() {
|
||||||
|
return nsQuota >= 0 || dsQuota >= 0;
|
||||||
|
}
|
||||||
|
|
||||||
private String namespaceString() {
|
private String namespaceString() {
|
||||||
return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);
|
return "namespace: " + (nsQuota < 0? "-": namespace + "/" + nsQuota);
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
|
||||||
import org.apache.hadoop.hdfs.util.ByteArray;
|
import org.apache.hadoop.hdfs.util.ByteArray;
|
||||||
|
@ -622,8 +621,7 @@ public class FSDirectory implements Closeable {
|
||||||
// snapshot is taken on the dst tree, changes will be recorded in the latest
|
// snapshot is taken on the dst tree, changes will be recorded in the latest
|
||||||
// snapshot of the src tree.
|
// snapshot of the src tree.
|
||||||
if (isSrcInSnapshot) {
|
if (isSrcInSnapshot) {
|
||||||
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot(),
|
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
srcIIP.setLastINode(srcChild);
|
srcIIP.setLastINode(srcChild);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -692,11 +690,9 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
// update modification time of dst and the parent of src
|
// update modification time of dst and the parent of src
|
||||||
final INode srcParent = srcIIP.getINode(-2);
|
final INode srcParent = srcIIP.getINode(-2);
|
||||||
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot(),
|
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
dstParent = dstIIP.getINode(-2); // refresh dstParent
|
dstParent = dstIIP.getINode(-2); // refresh dstParent
|
||||||
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot(),
|
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
// update moved leases with new filename
|
// update moved leases with new filename
|
||||||
getFSNamesystem().unprotectedChangeLease(src, dst);
|
getFSNamesystem().unprotectedChangeLease(src, dst);
|
||||||
|
|
||||||
|
@ -734,11 +730,10 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isSrcInSnapshot) {
|
if (isSrcInSnapshot) {
|
||||||
// srcParent must be an INodeDirectoryWithSnapshot instance since
|
// srcParent must have snapshot feature since isSrcInSnapshot is true
|
||||||
// isSrcInSnapshot is true and src node has been removed from
|
// and src node has been removed from srcParent
|
||||||
// srcParent
|
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild,
|
||||||
((INodeDirectoryWithSnapshot) srcParent).undoRename4ScrParent(
|
srcIIP.getLatestSnapshot());
|
||||||
oldSrcChild.asReference(), srcChild, srcIIP.getLatestSnapshot());
|
|
||||||
} else {
|
} else {
|
||||||
// original srcChild is not in latest snapshot, we only need to add
|
// original srcChild is not in latest snapshot, we only need to add
|
||||||
// the srcChild back
|
// the srcChild back
|
||||||
|
@ -879,8 +874,7 @@ public class FSDirectory implements Closeable {
|
||||||
// snapshot is taken on the dst tree, changes will be recorded in the latest
|
// snapshot is taken on the dst tree, changes will be recorded in the latest
|
||||||
// snapshot of the src tree.
|
// snapshot of the src tree.
|
||||||
if (isSrcInSnapshot) {
|
if (isSrcInSnapshot) {
|
||||||
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot(),
|
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
srcIIP.setLastINode(srcChild);
|
srcIIP.setLastINode(srcChild);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -958,11 +952,9 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
final INode srcParent = srcIIP.getINode(-2);
|
final INode srcParent = srcIIP.getINode(-2);
|
||||||
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot(),
|
srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
dstParent = dstIIP.getINode(-2);
|
dstParent = dstIIP.getINode(-2);
|
||||||
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot(),
|
dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
// update moved lease with new filename
|
// update moved lease with new filename
|
||||||
getFSNamesystem().unprotectedChangeLease(src, dst);
|
getFSNamesystem().unprotectedChangeLease(src, dst);
|
||||||
|
|
||||||
|
@ -1019,9 +1011,9 @@ public class FSDirectory implements Closeable {
|
||||||
withCount.getReferredINode().setLocalName(srcChildName);
|
withCount.getReferredINode().setLocalName(srcChildName);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (srcParent instanceof INodeDirectoryWithSnapshot) {
|
if (srcParent.isWithSnapshot()) {
|
||||||
((INodeDirectoryWithSnapshot) srcParent).undoRename4ScrParent(
|
srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild,
|
||||||
oldSrcChild.asReference(), srcChild, srcIIP.getLatestSnapshot());
|
srcIIP.getLatestSnapshot());
|
||||||
} else {
|
} else {
|
||||||
// srcParent is not an INodeDirectoryWithSnapshot, we only need to add
|
// srcParent is not an INodeDirectoryWithSnapshot, we only need to add
|
||||||
// the srcChild back
|
// the srcChild back
|
||||||
|
@ -1030,9 +1022,9 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
if (undoRemoveDst) {
|
if (undoRemoveDst) {
|
||||||
// Rename failed - restore dst
|
// Rename failed - restore dst
|
||||||
if (dstParent instanceof INodeDirectoryWithSnapshot) {
|
if (dstParent.isDirectory() && dstParent.asDirectory().isWithSnapshot()) {
|
||||||
((INodeDirectoryWithSnapshot) dstParent).undoRename4DstParent(
|
dstParent.asDirectory().undoRename4DstParent(removedDst,
|
||||||
removedDst, dstIIP.getLatestSnapshot());
|
dstIIP.getLatestSnapshot());
|
||||||
} else {
|
} else {
|
||||||
addLastINodeNoQuotaCheck(dstIIP, removedDst);
|
addLastINodeNoQuotaCheck(dstIIP, removedDst);
|
||||||
}
|
}
|
||||||
|
@ -1163,8 +1155,7 @@ public class FSDirectory implements Closeable {
|
||||||
if (inode == null) {
|
if (inode == null) {
|
||||||
throw new FileNotFoundException("File does not exist: " + src);
|
throw new FileNotFoundException("File does not exist: " + src);
|
||||||
}
|
}
|
||||||
inode.setPermission(permissions, inodesInPath.getLatestSnapshot(),
|
inode.setPermission(permissions, inodesInPath.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void setOwner(String src, String username, String groupname)
|
void setOwner(String src, String username, String groupname)
|
||||||
|
@ -1189,11 +1180,10 @@ public class FSDirectory implements Closeable {
|
||||||
throw new FileNotFoundException("File does not exist: " + src);
|
throw new FileNotFoundException("File does not exist: " + src);
|
||||||
}
|
}
|
||||||
if (username != null) {
|
if (username != null) {
|
||||||
inode = inode.setUser(username, inodesInPath.getLatestSnapshot(),
|
inode = inode.setUser(username, inodesInPath.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
}
|
}
|
||||||
if (groupname != null) {
|
if (groupname != null) {
|
||||||
inode.setGroup(groupname, inodesInPath.getLatestSnapshot(), inodeMap);
|
inode.setGroup(groupname, inodesInPath.getLatestSnapshot());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1266,7 +1256,7 @@ public class FSDirectory implements Closeable {
|
||||||
if(nodeToRemove == null) continue;
|
if(nodeToRemove == null) continue;
|
||||||
|
|
||||||
nodeToRemove.setBlocks(null);
|
nodeToRemove.setBlocks(null);
|
||||||
trgParent.removeChild(nodeToRemove, trgLatestSnapshot, null);
|
trgParent.removeChild(nodeToRemove, trgLatestSnapshot);
|
||||||
inodeMap.remove(nodeToRemove);
|
inodeMap.remove(nodeToRemove);
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
@ -1274,8 +1264,8 @@ public class FSDirectory implements Closeable {
|
||||||
// update inodeMap
|
// update inodeMap
|
||||||
removeFromInodeMap(Arrays.asList(allSrcInodes));
|
removeFromInodeMap(Arrays.asList(allSrcInodes));
|
||||||
|
|
||||||
trgInode.setModificationTime(timestamp, trgLatestSnapshot, inodeMap);
|
trgInode.setModificationTime(timestamp, trgLatestSnapshot);
|
||||||
trgParent.updateModificationTime(timestamp, trgLatestSnapshot, inodeMap);
|
trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
|
||||||
// update quota on the parent directory ('count' files removed, 0 space)
|
// update quota on the parent directory ('count' files removed, 0 space)
|
||||||
unprotectedUpdateCount(trgIIP, trgINodes.length-1, -count, 0);
|
unprotectedUpdateCount(trgIIP, trgINodes.length-1, -count, 0);
|
||||||
}
|
}
|
||||||
|
@ -1419,7 +1409,7 @@ public class FSDirectory implements Closeable {
|
||||||
|
|
||||||
// record modification
|
// record modification
|
||||||
final Snapshot latestSnapshot = iip.getLatestSnapshot();
|
final Snapshot latestSnapshot = iip.getLatestSnapshot();
|
||||||
targetNode = targetNode.recordModification(latestSnapshot, inodeMap);
|
targetNode = targetNode.recordModification(latestSnapshot);
|
||||||
iip.setLastINode(targetNode);
|
iip.setLastINode(targetNode);
|
||||||
|
|
||||||
// Remove the node from the namespace
|
// Remove the node from the namespace
|
||||||
|
@ -1430,7 +1420,7 @@ public class FSDirectory implements Closeable {
|
||||||
|
|
||||||
// set the parent's modification time
|
// set the parent's modification time
|
||||||
final INodeDirectory parent = targetNode.getParent();
|
final INodeDirectory parent = targetNode.getParent();
|
||||||
parent.updateModificationTime(mtime, latestSnapshot, inodeMap);
|
parent.updateModificationTime(mtime, latestSnapshot);
|
||||||
if (removed == 0) {
|
if (removed == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2203,8 +2193,7 @@ public class FSDirectory implements Closeable {
|
||||||
final INodeDirectory parent = inodes[pos-1].asDirectory();
|
final INodeDirectory parent = inodes[pos-1].asDirectory();
|
||||||
boolean added = false;
|
boolean added = false;
|
||||||
try {
|
try {
|
||||||
added = parent.addChild(child, true, iip.getLatestSnapshot(),
|
added = parent.addChild(child, true, iip.getLatestSnapshot());
|
||||||
inodeMap);
|
|
||||||
} catch (QuotaExceededException e) {
|
} catch (QuotaExceededException e) {
|
||||||
updateCountNoQuotaCheck(iip, pos,
|
updateCountNoQuotaCheck(iip, pos,
|
||||||
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
||||||
|
@ -2242,7 +2231,7 @@ public class FSDirectory implements Closeable {
|
||||||
final Snapshot latestSnapshot = iip.getLatestSnapshot();
|
final Snapshot latestSnapshot = iip.getLatestSnapshot();
|
||||||
final INode last = iip.getLastINode();
|
final INode last = iip.getLastINode();
|
||||||
final INodeDirectory parent = iip.getINode(-2).asDirectory();
|
final INodeDirectory parent = iip.getINode(-2).asDirectory();
|
||||||
if (!parent.removeChild(last, latestSnapshot, inodeMap)) {
|
if (!parent.removeChild(last, latestSnapshot)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
INodeDirectory newParent = last.getParent();
|
INodeDirectory newParent = last.getParent();
|
||||||
|
@ -2394,7 +2383,7 @@ public class FSDirectory implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
final Snapshot latest = iip.getLatestSnapshot();
|
final Snapshot latest = iip.getLatestSnapshot();
|
||||||
dirNode = dirNode.recordModification(latest, inodeMap);
|
dirNode = dirNode.recordModification(latest);
|
||||||
dirNode.setQuota(nsQuota, dsQuota);
|
dirNode.setQuota(nsQuota, dsQuota);
|
||||||
return dirNode;
|
return dirNode;
|
||||||
}
|
}
|
||||||
|
@ -2462,7 +2451,7 @@ public class FSDirectory implements Closeable {
|
||||||
assert hasWriteLock();
|
assert hasWriteLock();
|
||||||
boolean status = false;
|
boolean status = false;
|
||||||
if (mtime != -1) {
|
if (mtime != -1) {
|
||||||
inode = inode.setModificationTime(mtime, latest, inodeMap);
|
inode = inode.setModificationTime(mtime, latest);
|
||||||
status = true;
|
status = true;
|
||||||
}
|
}
|
||||||
if (atime != -1) {
|
if (atime != -1) {
|
||||||
|
@ -2473,7 +2462,7 @@ public class FSDirectory implements Closeable {
|
||||||
if (atime <= inodeTime + getFSNamesystem().getAccessTimePrecision() && !force) {
|
if (atime <= inodeTime + getFSNamesystem().getAccessTimePrecision() && !force) {
|
||||||
status = false;
|
status = false;
|
||||||
} else {
|
} else {
|
||||||
inode.setAccessTime(atime, latest, inodeMap);
|
inode.setAccessTime(atime, latest);
|
||||||
status = true;
|
status = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,18 +31,18 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
|
||||||
|
@ -55,11 +55,11 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteSnapshotOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
|
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
|
||||||
|
@ -354,8 +354,8 @@ public class FSEditLogLoader {
|
||||||
// update the block list.
|
// update the block list.
|
||||||
|
|
||||||
// Update the salient file attributes.
|
// Update the salient file attributes.
|
||||||
newFile.setAccessTime(addCloseOp.atime, null, fsDir.getINodeMap());
|
newFile.setAccessTime(addCloseOp.atime, null);
|
||||||
newFile.setModificationTime(addCloseOp.mtime, null, fsDir.getINodeMap());
|
newFile.setModificationTime(addCloseOp.mtime, null);
|
||||||
updateBlocks(fsDir, addCloseOp, newFile);
|
updateBlocks(fsDir, addCloseOp, newFile);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -373,8 +373,8 @@ public class FSEditLogLoader {
|
||||||
final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
|
final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
|
||||||
|
|
||||||
// Update the salient file attributes.
|
// Update the salient file attributes.
|
||||||
file.setAccessTime(addCloseOp.atime, null, fsDir.getINodeMap());
|
file.setAccessTime(addCloseOp.atime, null);
|
||||||
file.setModificationTime(addCloseOp.mtime, null, fsDir.getINodeMap());
|
file.setModificationTime(addCloseOp.mtime, null);
|
||||||
updateBlocks(fsDir, addCloseOp, file);
|
updateBlocks(fsDir, addCloseOp, file);
|
||||||
|
|
||||||
// Now close the file
|
// Now close the file
|
||||||
|
|
|
@ -52,9 +52,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||||
|
@ -731,9 +731,10 @@ public class FSImageFormat {
|
||||||
if (nsQuota >= 0 || dsQuota >= 0) {
|
if (nsQuota >= 0 || dsQuota >= 0) {
|
||||||
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
|
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
|
||||||
}
|
}
|
||||||
return snapshottable ? new INodeDirectorySnapshottable(dir)
|
if (withSnapshot) {
|
||||||
: withSnapshot ? new INodeDirectoryWithSnapshot(dir)
|
dir.addSnapshotFeature(null);
|
||||||
: dir;
|
}
|
||||||
|
return snapshottable ? new INodeDirectorySnapshottable(dir) : dir;
|
||||||
} else if (numBlocks == -2) {
|
} else if (numBlocks == -2) {
|
||||||
//symlink
|
//symlink
|
||||||
|
|
||||||
|
@ -1113,10 +1114,10 @@ public class FSImageFormat {
|
||||||
final ReadOnlyList<INode> children = current.getChildrenList(null);
|
final ReadOnlyList<INode> children = current.getChildrenList(null);
|
||||||
int dirNum = 0;
|
int dirNum = 0;
|
||||||
List<INodeDirectory> snapshotDirs = null;
|
List<INodeDirectory> snapshotDirs = null;
|
||||||
if (current instanceof INodeDirectoryWithSnapshot) {
|
DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
snapshotDirs = new ArrayList<INodeDirectory>();
|
snapshotDirs = new ArrayList<INodeDirectory>();
|
||||||
((INodeDirectoryWithSnapshot) current).getSnapshotDirectory(
|
sf.getSnapshotDirectory(snapshotDirs);
|
||||||
snapshotDirs);
|
|
||||||
dirNum += snapshotDirs.size();
|
dirNum += snapshotDirs.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||||
import org.apache.hadoop.io.IntWritable;
|
import org.apache.hadoop.io.IntWritable;
|
||||||
|
@ -239,7 +238,7 @@ public class FSImageSerialization {
|
||||||
out.writeBoolean(true);
|
out.writeBoolean(true);
|
||||||
} else {
|
} else {
|
||||||
out.writeBoolean(false);
|
out.writeBoolean(false);
|
||||||
out.writeBoolean(node instanceof INodeDirectoryWithSnapshot);
|
out.writeBoolean(node.isWithSnapshot());
|
||||||
}
|
}
|
||||||
|
|
||||||
writePermissionStatus(node, out);
|
writePermissionStatus(node, out);
|
||||||
|
|
|
@ -737,12 +737,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
this.dtSecretManager = createDelegationTokenSecretManager(conf);
|
this.dtSecretManager = createDelegationTokenSecretManager(conf);
|
||||||
this.dir = new FSDirectory(fsImage, this, conf);
|
this.dir = new FSDirectory(fsImage, this, conf);
|
||||||
this.snapshotManager = new SnapshotManager(dir);
|
this.snapshotManager = new SnapshotManager(dir);
|
||||||
writeLock();
|
this.cacheManager = new CacheManager(this, conf, blockManager);
|
||||||
try {
|
|
||||||
this.cacheManager = new CacheManager(this, conf, blockManager);
|
|
||||||
} finally {
|
|
||||||
writeUnlock();
|
|
||||||
}
|
|
||||||
this.safeMode = new SafeModeInfo(conf);
|
this.safeMode = new SafeModeInfo(conf);
|
||||||
this.auditLoggers = initAuditLoggers(conf);
|
this.auditLoggers = initAuditLoggers(conf);
|
||||||
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
|
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
|
||||||
|
@ -2297,7 +2292,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
|
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
|
||||||
boolean writeToEditLog, Snapshot latestSnapshot, boolean logRetryCache)
|
boolean writeToEditLog, Snapshot latestSnapshot, boolean logRetryCache)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
file = file.recordModification(latestSnapshot, dir.getINodeMap());
|
file = file.recordModification(latestSnapshot);
|
||||||
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine,
|
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine,
|
||||||
clientNode);
|
clientNode);
|
||||||
|
|
||||||
|
@ -2879,8 +2874,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
writeUnlock();
|
writeUnlock();
|
||||||
}
|
}
|
||||||
getEditLog().logSync();
|
getEditLog().logSync();
|
||||||
NameNode.stateChangeLog.info("DIR* completeFile: " + src + " is closed by "
|
if (success) {
|
||||||
+ holder);
|
NameNode.stateChangeLog.info("DIR* completeFile: " + src
|
||||||
|
+ " is closed by " + holder);
|
||||||
|
}
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3785,8 +3782,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
Preconditions.checkArgument(uc != null);
|
Preconditions.checkArgument(uc != null);
|
||||||
leaseManager.removeLease(uc.getClientName(), src);
|
leaseManager.removeLease(uc.getClientName(), src);
|
||||||
|
|
||||||
pendingFile = pendingFile.recordModification(latestSnapshot,
|
pendingFile = pendingFile.recordModification(latestSnapshot);
|
||||||
dir.getINodeMap());
|
|
||||||
|
|
||||||
// The file is no longer pending.
|
// The file is no longer pending.
|
||||||
// Create permanent INode, update blocks. No need to replace the inode here
|
// Create permanent INode, update blocks. No need to replace the inode here
|
||||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
|
import org.apache.hadoop.hdfs.util.ChunkedArrayList;
|
||||||
import org.apache.hadoop.hdfs.util.Diff;
|
import org.apache.hadoop.hdfs.util.Diff;
|
||||||
|
@ -96,9 +95,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
abstract void setUser(String user);
|
abstract void setUser(String user);
|
||||||
|
|
||||||
/** Set user */
|
/** Set user */
|
||||||
final INode setUser(String user, Snapshot latest, INodeMap inodeMap)
|
final INode setUser(String user, Snapshot latest)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
final INode nodeToUpdate = recordModification(latest, inodeMap);
|
final INode nodeToUpdate = recordModification(latest);
|
||||||
nodeToUpdate.setUser(user);
|
nodeToUpdate.setUser(user);
|
||||||
return nodeToUpdate;
|
return nodeToUpdate;
|
||||||
}
|
}
|
||||||
|
@ -120,9 +119,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
abstract void setGroup(String group);
|
abstract void setGroup(String group);
|
||||||
|
|
||||||
/** Set group */
|
/** Set group */
|
||||||
final INode setGroup(String group, Snapshot latest, INodeMap inodeMap)
|
final INode setGroup(String group, Snapshot latest)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
final INode nodeToUpdate = recordModification(latest, inodeMap);
|
final INode nodeToUpdate = recordModification(latest);
|
||||||
nodeToUpdate.setGroup(group);
|
nodeToUpdate.setGroup(group);
|
||||||
return nodeToUpdate;
|
return nodeToUpdate;
|
||||||
}
|
}
|
||||||
|
@ -145,9 +144,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
abstract void setPermission(FsPermission permission);
|
abstract void setPermission(FsPermission permission);
|
||||||
|
|
||||||
/** Set the {@link FsPermission} of this {@link INode} */
|
/** Set the {@link FsPermission} of this {@link INode} */
|
||||||
INode setPermission(FsPermission permission, Snapshot latest,
|
INode setPermission(FsPermission permission, Snapshot latest)
|
||||||
INodeMap inodeMap) throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
final INode nodeToUpdate = recordModification(latest, inodeMap);
|
final INode nodeToUpdate = recordModification(latest);
|
||||||
nodeToUpdate.setPermission(permission);
|
nodeToUpdate.setPermission(permission);
|
||||||
return nodeToUpdate;
|
return nodeToUpdate;
|
||||||
}
|
}
|
||||||
|
@ -231,14 +230,12 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
*
|
*
|
||||||
* @param latest the latest snapshot that has been taken.
|
* @param latest the latest snapshot that has been taken.
|
||||||
* Note that it is null if no snapshots have been taken.
|
* Note that it is null if no snapshots have been taken.
|
||||||
* @param inodeMap while recording modification, the inode or its parent may
|
|
||||||
* get replaced, and the inodeMap needs to be updated.
|
|
||||||
* @return The current inode, which usually is the same object of this inode.
|
* @return The current inode, which usually is the same object of this inode.
|
||||||
* However, in some cases, this inode may be replaced with a new inode
|
* However, in some cases, this inode may be replaced with a new inode
|
||||||
* for maintaining snapshots. The current inode is then the new inode.
|
* for maintaining snapshots. The current inode is then the new inode.
|
||||||
*/
|
*/
|
||||||
abstract INode recordModification(final Snapshot latest,
|
abstract INode recordModification(final Snapshot latest)
|
||||||
final INodeMap inodeMap) throws QuotaExceededException;
|
throws QuotaExceededException;
|
||||||
|
|
||||||
/** Check whether it's a reference. */
|
/** Check whether it's a reference. */
|
||||||
public boolean isReference() {
|
public boolean isReference() {
|
||||||
|
@ -318,7 +315,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
* Call recordModification(..) to capture the current states.
|
* Call recordModification(..) to capture the current states.
|
||||||
* Mark the INode as deleted.
|
* Mark the INode as deleted.
|
||||||
*
|
*
|
||||||
* 1.4 The current inode is a {@link INodeDirectoryWithSnapshot}.
|
* 1.4 The current inode is an {@link INodeDirectory} with snapshot feature.
|
||||||
* Call recordModification(..) to capture the current states.
|
* Call recordModification(..) to capture the current states.
|
||||||
* Destroy files/directories created after the latest snapshot
|
* Destroy files/directories created after the latest snapshot
|
||||||
* (i.e., the inodes stored in the created list of the latest snapshot).
|
* (i.e., the inodes stored in the created list of the latest snapshot).
|
||||||
|
@ -329,7 +326,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
* 2.2 To clean {@link INodeDirectory}: recursively clean its children.
|
* 2.2 To clean {@link INodeDirectory}: recursively clean its children.
|
||||||
* 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in
|
* 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in
|
||||||
* its diff list.
|
* its diff list.
|
||||||
* 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding
|
* 2.4 To clean {@link INodeDirectory} with snapshot: delete the corresponding
|
||||||
* snapshot in its diff list. Recursively clean its children.
|
* snapshot in its diff list. Recursively clean its children.
|
||||||
* </pre>
|
* </pre>
|
||||||
*
|
*
|
||||||
|
@ -575,16 +572,16 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Update modification time if it is larger than the current value. */
|
/** Update modification time if it is larger than the current value. */
|
||||||
public abstract INode updateModificationTime(long mtime, Snapshot latest,
|
public abstract INode updateModificationTime(long mtime, Snapshot latest)
|
||||||
INodeMap inodeMap) throws QuotaExceededException;
|
throws QuotaExceededException;
|
||||||
|
|
||||||
/** Set the last modification time of inode. */
|
/** Set the last modification time of inode. */
|
||||||
public abstract void setModificationTime(long modificationTime);
|
public abstract void setModificationTime(long modificationTime);
|
||||||
|
|
||||||
/** Set the last modification time of inode. */
|
/** Set the last modification time of inode. */
|
||||||
public final INode setModificationTime(long modificationTime,
|
public final INode setModificationTime(long modificationTime,
|
||||||
Snapshot latest, INodeMap inodeMap) throws QuotaExceededException {
|
Snapshot latest) throws QuotaExceededException {
|
||||||
final INode nodeToUpdate = recordModification(latest, inodeMap);
|
final INode nodeToUpdate = recordModification(latest);
|
||||||
nodeToUpdate.setModificationTime(modificationTime);
|
nodeToUpdate.setModificationTime(modificationTime);
|
||||||
return nodeToUpdate;
|
return nodeToUpdate;
|
||||||
}
|
}
|
||||||
|
@ -611,9 +608,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
/**
|
/**
|
||||||
* Set last access time of inode.
|
* Set last access time of inode.
|
||||||
*/
|
*/
|
||||||
public final INode setAccessTime(long accessTime, Snapshot latest,
|
public final INode setAccessTime(long accessTime, Snapshot latest)
|
||||||
INodeMap inodeMap) throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
final INode nodeToUpdate = recordModification(latest, inodeMap);
|
final INode nodeToUpdate = recordModification(latest);
|
||||||
nodeToUpdate.setAccessTime(accessTime);
|
nodeToUpdate.setAccessTime(accessTime);
|
||||||
return nodeToUpdate;
|
return nodeToUpdate;
|
||||||
}
|
}
|
||||||
|
@ -753,8 +750,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** INode feature such as {@link FileUnderConstructionFeature}
|
/**
|
||||||
* and {@link DirectoryWithQuotaFeature}.
|
* INode feature such as {@link FileUnderConstructionFeature}
|
||||||
|
* and {@link DirectoryWithQuotaFeature}.
|
||||||
*/
|
*/
|
||||||
public interface Feature {
|
public interface Feature {
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,9 +32,11 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
|
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
@ -169,39 +171,48 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
return children == null? -1: Collections.binarySearch(children, name);
|
return children == null? -1: Collections.binarySearch(children, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
protected DirectoryWithSnapshotFeature addSnapshotFeature(
|
||||||
* Remove the specified child from this directory.
|
DirectoryDiffList diffs) {
|
||||||
*
|
Preconditions.checkState(!isWithSnapshot(),
|
||||||
* @param child the child inode to be removed
|
"Directory is already with snapshot");
|
||||||
* @param latest See {@link INode#recordModification(Snapshot, INodeMap)}.
|
DirectoryWithSnapshotFeature sf = new DirectoryWithSnapshotFeature(diffs);
|
||||||
*/
|
addFeature(sf);
|
||||||
public boolean removeChild(INode child, Snapshot latest,
|
return sf;
|
||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
|
||||||
if (isInLatestSnapshot(latest)) {
|
|
||||||
return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
|
|
||||||
.removeChild(child, latest, inodeMap);
|
|
||||||
}
|
|
||||||
|
|
||||||
return removeChild(child);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove the specified child from this directory.
|
* If feature list contains a {@link DirectoryWithSnapshotFeature}, return it;
|
||||||
* The basic remove method which actually calls children.remove(..).
|
* otherwise, return null.
|
||||||
*
|
|
||||||
* @param child the child inode to be removed
|
|
||||||
*
|
|
||||||
* @return true if the child is removed; false if the child is not found.
|
|
||||||
*/
|
*/
|
||||||
protected final boolean removeChild(final INode child) {
|
public final DirectoryWithSnapshotFeature getDirectoryWithSnapshotFeature() {
|
||||||
final int i = searchChildren(child.getLocalNameBytes());
|
for (Feature f : features) {
|
||||||
if (i < 0) {
|
if (f instanceof DirectoryWithSnapshotFeature) {
|
||||||
return false;
|
return (DirectoryWithSnapshotFeature) f;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
final INode removed = children.remove(i);
|
/** Is this file has the snapshot feature? */
|
||||||
Preconditions.checkState(removed == child);
|
public final boolean isWithSnapshot() {
|
||||||
return true;
|
return getDirectoryWithSnapshotFeature() != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public DirectoryDiffList getDiffs() {
|
||||||
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
return sf != null ? sf.getDiffs() : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public INodeDirectoryAttributes getSnapshotINode(Snapshot snapshot) {
|
||||||
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshot, this);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toDetailString() {
|
||||||
|
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
|
||||||
|
return super.toDetailString() + (sf == null ? "" : ", " + sf.getDiffs());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
|
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
|
||||||
|
@ -210,16 +221,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
|
Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
|
||||||
"this is already an INodeDirectorySnapshottable, this=%s", this);
|
"this is already an INodeDirectorySnapshottable, this=%s", this);
|
||||||
final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
|
final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
|
||||||
replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this);
|
replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs()
|
||||||
|
.saveSelf2Snapshot(latest, s, this);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Replace itself with an {@link INodeDirectoryWithSnapshot}. */
|
|
||||||
public INodeDirectoryWithSnapshot replaceSelf4INodeDirectoryWithSnapshot(
|
|
||||||
final INodeMap inodeMap) {
|
|
||||||
return replaceSelf(new INodeDirectoryWithSnapshot(this), inodeMap);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Replace itself with {@link INodeDirectory}. */
|
/** Replace itself with {@link INodeDirectory}. */
|
||||||
public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
|
public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
|
||||||
Preconditions.checkState(getClass() != INodeDirectory.class,
|
Preconditions.checkState(getClass() != INodeDirectory.class,
|
||||||
|
@ -245,7 +251,13 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
return newDir;
|
return newDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Replace the given child with a new child. */
|
/**
|
||||||
|
* Replace the given child with a new child. Note that we no longer need to
|
||||||
|
* replace an normal INodeDirectory or INodeFile into an
|
||||||
|
* INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
|
||||||
|
* for child replacement is for {@link INodeDirectorySnapshottable} and
|
||||||
|
* reference nodes.
|
||||||
|
*/
|
||||||
public void replaceChild(INode oldChild, final INode newChild,
|
public void replaceChild(INode oldChild, final INode newChild,
|
||||||
final INodeMap inodeMap) {
|
final INodeMap inodeMap) {
|
||||||
Preconditions.checkNotNull(children);
|
Preconditions.checkNotNull(children);
|
||||||
|
@ -256,20 +268,20 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
.asReference().getReferredINode());
|
.asReference().getReferredINode());
|
||||||
oldChild = children.get(i);
|
oldChild = children.get(i);
|
||||||
|
|
||||||
if (oldChild.isReference() && !newChild.isReference()) {
|
if (oldChild.isReference() && newChild.isReference()) {
|
||||||
// replace the referred inode, e.g.,
|
// both are reference nodes, e.g., DstReference -> WithName
|
||||||
// INodeFileWithSnapshot -> INodeFileUnderConstructionWithSnapshot
|
final INodeReference.WithCount withCount =
|
||||||
final INode withCount = oldChild.asReference().getReferredINode();
|
(WithCount) oldChild.asReference().getReferredINode();
|
||||||
withCount.asReference().setReferredINode(newChild);
|
withCount.removeReference(oldChild.asReference());
|
||||||
} else {
|
|
||||||
if (oldChild.isReference()) {
|
|
||||||
// both are reference nodes, e.g., DstReference -> WithName
|
|
||||||
final INodeReference.WithCount withCount =
|
|
||||||
(WithCount) oldChild.asReference().getReferredINode();
|
|
||||||
withCount.removeReference(oldChild.asReference());
|
|
||||||
}
|
|
||||||
children.set(i, newChild);
|
|
||||||
}
|
}
|
||||||
|
children.set(i, newChild);
|
||||||
|
|
||||||
|
// replace the instance in the created list of the diff list
|
||||||
|
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
|
||||||
|
}
|
||||||
|
|
||||||
// update the inodeMap
|
// update the inodeMap
|
||||||
if (inodeMap != null) {
|
if (inodeMap != null) {
|
||||||
inodeMap.put(newChild);
|
inodeMap.put(newChild);
|
||||||
|
@ -298,14 +310,18 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public INodeDirectory recordModification(Snapshot latest,
|
public INodeDirectory recordModification(Snapshot latest)
|
||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
if (isInLatestSnapshot(latest)) {
|
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
|
||||||
return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
|
// add snapshot feature if necessary
|
||||||
.recordModification(latest, inodeMap);
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
} else {
|
if (sf == null) {
|
||||||
return this;
|
sf = addSnapshotFeature(null);
|
||||||
|
}
|
||||||
|
// record self in the diff list if necessary
|
||||||
|
sf.getDiffs().saveSelf2Snapshot(latest, this, null);
|
||||||
}
|
}
|
||||||
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -314,13 +330,17 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
* @return the child inode, which may be replaced.
|
* @return the child inode, which may be replaced.
|
||||||
*/
|
*/
|
||||||
public INode saveChild2Snapshot(final INode child, final Snapshot latest,
|
public INode saveChild2Snapshot(final INode child, final Snapshot latest,
|
||||||
final INode snapshotCopy, final INodeMap inodeMap)
|
final INode snapshotCopy) throws QuotaExceededException {
|
||||||
throws QuotaExceededException {
|
|
||||||
if (latest == null) {
|
if (latest == null) {
|
||||||
return child;
|
return child;
|
||||||
}
|
}
|
||||||
return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
|
|
||||||
.saveChild2Snapshot(child, latest, snapshotCopy, inodeMap);
|
// add snapshot feature if necessary
|
||||||
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf == null) {
|
||||||
|
sf = this.addSnapshotFeature(null);
|
||||||
|
}
|
||||||
|
return sf.saveChild2Snapshot(this, child, latest, snapshotCopy);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -331,9 +351,36 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
* @return the child inode.
|
* @return the child inode.
|
||||||
*/
|
*/
|
||||||
public INode getChild(byte[] name, Snapshot snapshot) {
|
public INode getChild(byte[] name, Snapshot snapshot) {
|
||||||
final ReadOnlyList<INode> c = getChildrenList(snapshot);
|
DirectoryWithSnapshotFeature sf;
|
||||||
final int i = ReadOnlyList.Util.binarySearch(c, name);
|
if (snapshot == null || (sf = getDirectoryWithSnapshotFeature()) == null) {
|
||||||
return i < 0? null: c.get(i);
|
ReadOnlyList<INode> c = getCurrentChildrenList();
|
||||||
|
final int i = ReadOnlyList.Util.binarySearch(c, name);
|
||||||
|
return i < 0 ? null : c.get(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
return sf.getChild(this, name, snapshot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param snapshot
|
||||||
|
* if it is not null, get the result from the given snapshot;
|
||||||
|
* otherwise, get the result from the current directory.
|
||||||
|
* @return the current children list if the specified snapshot is null;
|
||||||
|
* otherwise, return the children list corresponding to the snapshot.
|
||||||
|
* Note that the returned list is never null.
|
||||||
|
*/
|
||||||
|
public ReadOnlyList<INode> getChildrenList(final Snapshot snapshot) {
|
||||||
|
DirectoryWithSnapshotFeature sf;
|
||||||
|
if (snapshot == null
|
||||||
|
|| (sf = this.getDirectoryWithSnapshotFeature()) == null) {
|
||||||
|
return getCurrentChildrenList();
|
||||||
|
}
|
||||||
|
return sf.getChildrenList(this, snapshot);
|
||||||
|
}
|
||||||
|
|
||||||
|
private ReadOnlyList<INode> getCurrentChildrenList() {
|
||||||
|
return children == null ? ReadOnlyList.Util.<INode> emptyList()
|
||||||
|
: ReadOnlyList.Util.asReadOnlyList(children);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the {@link INodesInPath} containing only the last inode. */
|
/** @return the {@link INodesInPath} containing only the last inode. */
|
||||||
|
@ -400,6 +447,41 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
return -nextPos;
|
return -nextPos;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove the specified child from this directory.
|
||||||
|
*/
|
||||||
|
public boolean removeChild(INode child, Snapshot latest)
|
||||||
|
throws QuotaExceededException {
|
||||||
|
if (isInLatestSnapshot(latest)) {
|
||||||
|
// create snapshot feature if necessary
|
||||||
|
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf == null) {
|
||||||
|
sf = this.addSnapshotFeature(null);
|
||||||
|
}
|
||||||
|
return sf.removeChild(this, child, latest);
|
||||||
|
}
|
||||||
|
return removeChild(child);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove the specified child from this directory.
|
||||||
|
* The basic remove method which actually calls children.remove(..).
|
||||||
|
*
|
||||||
|
* @param child the child inode to be removed
|
||||||
|
*
|
||||||
|
* @return true if the child is removed; false if the child is not found.
|
||||||
|
*/
|
||||||
|
public boolean removeChild(final INode child) {
|
||||||
|
final int i = searchChildren(child.getLocalNameBytes());
|
||||||
|
if (i < 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
final INode removed = children.remove(i);
|
||||||
|
Preconditions.checkState(removed == child);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a child inode to the directory.
|
* Add a child inode to the directory.
|
||||||
*
|
*
|
||||||
|
@ -407,34 +489,32 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
* @param setModTime set modification time for the parent node
|
* @param setModTime set modification time for the parent node
|
||||||
* not needed when replaying the addition and
|
* not needed when replaying the addition and
|
||||||
* the parent already has the proper mod time
|
* the parent already has the proper mod time
|
||||||
* @param inodeMap update the inodeMap if the directory node gets replaced
|
|
||||||
* @return false if the child with this name already exists;
|
* @return false if the child with this name already exists;
|
||||||
* otherwise, return true;
|
* otherwise, return true;
|
||||||
*/
|
*/
|
||||||
public boolean addChild(INode node, final boolean setModTime,
|
public boolean addChild(INode node, final boolean setModTime,
|
||||||
final Snapshot latest, final INodeMap inodeMap)
|
final Snapshot latest) throws QuotaExceededException {
|
||||||
throws QuotaExceededException {
|
|
||||||
final int low = searchChildren(node.getLocalNameBytes());
|
final int low = searchChildren(node.getLocalNameBytes());
|
||||||
if (low >= 0) {
|
if (low >= 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isInLatestSnapshot(latest)) {
|
if (isInLatestSnapshot(latest)) {
|
||||||
INodeDirectoryWithSnapshot sdir =
|
// create snapshot feature if necessary
|
||||||
replaceSelf4INodeDirectoryWithSnapshot(inodeMap);
|
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
|
||||||
boolean added = sdir.addChild(node, setModTime, latest, inodeMap);
|
if (sf == null) {
|
||||||
return added;
|
sf = this.addSnapshotFeature(null);
|
||||||
|
}
|
||||||
|
return sf.addChild(this, node, setModTime, latest);
|
||||||
}
|
}
|
||||||
addChild(node, low);
|
addChild(node, low);
|
||||||
if (setModTime) {
|
if (setModTime) {
|
||||||
// update modification time of the parent directory
|
// update modification time of the parent directory
|
||||||
updateModificationTime(node.getModificationTime(), latest, inodeMap);
|
updateModificationTime(node.getModificationTime(), latest);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** The same as addChild(node, false, null, false) */
|
|
||||||
public boolean addChild(INode node) {
|
public boolean addChild(INode node) {
|
||||||
final int low = searchChildren(node.getLocalNameBytes());
|
final int low = searchChildren(node.getLocalNameBytes());
|
||||||
if (low >= 0) {
|
if (low >= 0) {
|
||||||
|
@ -463,21 +543,34 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
@Override
|
@Override
|
||||||
public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
|
public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
|
||||||
int lastSnapshotId) {
|
int lastSnapshotId) {
|
||||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
if (q != null) {
|
|
||||||
if (useCache && isQuotaSet()) {
|
// we are computing the quota usage for a specific snapshot here, i.e., the
|
||||||
q.addNamespaceDiskspace(counts);
|
// computation only includes files/directories that exist at the time of the
|
||||||
} else {
|
// given snapshot
|
||||||
computeDirectoryQuotaUsage(counts, false, lastSnapshotId);
|
if (sf != null && lastSnapshotId != Snapshot.INVALID_ID
|
||||||
|
&& !(useCache && isQuotaSet())) {
|
||||||
|
Snapshot lastSnapshot = sf.getDiffs().getSnapshotById(lastSnapshotId);
|
||||||
|
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
|
||||||
|
for (INode child : childrenList) {
|
||||||
|
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
||||||
}
|
}
|
||||||
|
counts.add(Quota.NAMESPACE, 1);
|
||||||
return counts;
|
return counts;
|
||||||
|
}
|
||||||
|
|
||||||
|
// compute the quota usage in the scope of the current directory tree
|
||||||
|
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||||
|
if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
|
||||||
|
return q.addNamespaceDiskspace(counts);
|
||||||
} else {
|
} else {
|
||||||
|
useCache = q != null && !q.isQuotaSet() ? false : useCache;
|
||||||
return computeDirectoryQuotaUsage(counts, useCache, lastSnapshotId);
|
return computeDirectoryQuotaUsage(counts, useCache, lastSnapshotId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts, boolean useCache,
|
private Quota.Counts computeDirectoryQuotaUsage(Quota.Counts counts,
|
||||||
int lastSnapshotId) {
|
boolean useCache, int lastSnapshotId) {
|
||||||
if (children != null) {
|
if (children != null) {
|
||||||
for (INode child : children) {
|
for (INode child : children) {
|
||||||
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
||||||
|
@ -489,12 +582,21 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
/** Add quota usage for this inode excluding children. */
|
/** Add quota usage for this inode excluding children. */
|
||||||
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
|
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
|
||||||
counts.add(Quota.NAMESPACE, 1);
|
counts.add(Quota.NAMESPACE, 1);
|
||||||
|
// include the diff list
|
||||||
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
sf.computeQuotaUsage4CurrentDirectory(counts);
|
||||||
|
}
|
||||||
return counts;
|
return counts;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ContentSummaryComputationContext computeContentSummary(
|
public ContentSummaryComputationContext computeContentSummary(
|
||||||
ContentSummaryComputationContext summary) {
|
ContentSummaryComputationContext summary) {
|
||||||
|
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
sf.computeContentSummary4Snapshot(summary.getCounts());
|
||||||
|
}
|
||||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||||
if (q != null) {
|
if (q != null) {
|
||||||
return q.computeContentSummary(this, summary);
|
return q.computeContentSummary(this, summary);
|
||||||
|
@ -521,13 +623,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
if (lastYieldCount == summary.getYieldCount()) {
|
if (lastYieldCount == summary.getYieldCount()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The locks were released and reacquired. Check parent first.
|
// The locks were released and reacquired. Check parent first.
|
||||||
if (getParent() == null) {
|
if (getParent() == null) {
|
||||||
// Stop further counting and return whatever we have so far.
|
// Stop further counting and return whatever we have so far.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Obtain the children list again since it may have been modified.
|
// Obtain the children list again since it may have been modified.
|
||||||
childrenList = getChildrenList(null);
|
childrenList = getChildrenList(null);
|
||||||
// Reposition in case the children list is changed. Decrement by 1
|
// Reposition in case the children list is changed. Decrement by 1
|
||||||
|
@ -537,24 +637,77 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
|
|
||||||
// Increment the directory count for this directory.
|
// Increment the directory count for this directory.
|
||||||
summary.getCounts().add(Content.DIRECTORY, 1);
|
summary.getCounts().add(Content.DIRECTORY, 1);
|
||||||
|
|
||||||
// Relinquish and reacquire locks if necessary.
|
// Relinquish and reacquire locks if necessary.
|
||||||
summary.yield();
|
summary.yield();
|
||||||
|
|
||||||
return summary;
|
return summary;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param snapshot
|
* This method is usually called by the undo section of rename.
|
||||||
* if it is not null, get the result from the given snapshot;
|
*
|
||||||
* otherwise, get the result from the current directory.
|
* Before calling this function, in the rename operation, we replace the
|
||||||
* @return the current children list if the specified snapshot is null;
|
* original src node (of the rename operation) with a reference node (WithName
|
||||||
* otherwise, return the children list corresponding to the snapshot.
|
* instance) in both the children list and a created list, delete the
|
||||||
* Note that the returned list is never null.
|
* reference node from the children list, and add it to the corresponding
|
||||||
|
* deleted list.
|
||||||
|
*
|
||||||
|
* To undo the above operations, we have the following steps in particular:
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* 1) remove the WithName node from the deleted list (if it exists)
|
||||||
|
* 2) replace the WithName node in the created list with srcChild
|
||||||
|
* 3) add srcChild back as a child of srcParent. Note that we already add
|
||||||
|
* the node into the created list of a snapshot diff in step 2, we do not need
|
||||||
|
* to add srcChild to the created list of the latest snapshot.
|
||||||
|
* </pre>
|
||||||
|
*
|
||||||
|
* We do not need to update quota usage because the old child is in the
|
||||||
|
* deleted list before.
|
||||||
|
*
|
||||||
|
* @param oldChild
|
||||||
|
* The reference node to be removed/replaced
|
||||||
|
* @param newChild
|
||||||
|
* The node to be added back
|
||||||
|
* @param latestSnapshot
|
||||||
|
* The latest snapshot. Note this may not be the last snapshot in the
|
||||||
|
* diff list, since the src tree of the current rename operation
|
||||||
|
* may be the dst tree of a previous rename.
|
||||||
|
* @throws QuotaExceededException should not throw this exception
|
||||||
*/
|
*/
|
||||||
public ReadOnlyList<INode> getChildrenList(final Snapshot snapshot) {
|
public void undoRename4ScrParent(final INodeReference oldChild,
|
||||||
return children == null ? ReadOnlyList.Util.<INode>emptyList()
|
final INode newChild, Snapshot latestSnapshot)
|
||||||
: ReadOnlyList.Util.asReadOnlyList(children);
|
throws QuotaExceededException {
|
||||||
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
Preconditions.checkState(sf != null,
|
||||||
|
"Directory does not have snapshot feature");
|
||||||
|
sf.getDiffs().removeChild(ListType.DELETED, oldChild);
|
||||||
|
sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
|
||||||
|
addChild(newChild, true, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Undo the rename operation for the dst tree, i.e., if the rename operation
|
||||||
|
* (with OVERWRITE option) removes a file/dir from the dst tree, add it back
|
||||||
|
* and delete possible record in the deleted list.
|
||||||
|
*/
|
||||||
|
public void undoRename4DstParent(final INode deletedChild,
|
||||||
|
Snapshot latestSnapshot) throws QuotaExceededException {
|
||||||
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
Preconditions.checkState(sf != null,
|
||||||
|
"Directory does not have snapshot feature");
|
||||||
|
boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
|
||||||
|
deletedChild);
|
||||||
|
// pass null for inodeMap since the parent node will not get replaced when
|
||||||
|
// undoing rename
|
||||||
|
final boolean added = addChild(deletedChild, true, removeDeletedChild ? null
|
||||||
|
: latestSnapshot);
|
||||||
|
// update quota usage if adding is successfully and the old child has not
|
||||||
|
// been stored in deleted list before
|
||||||
|
if (added && !removeDeletedChild) {
|
||||||
|
final Quota.Counts counts = deletedChild.computeQuotaUsage();
|
||||||
|
addSpaceConsumed(counts.get(Quota.NAMESPACE),
|
||||||
|
counts.get(Quota.DISKSPACE), false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Set the children list to null. */
|
/** Set the children list to null. */
|
||||||
|
@ -578,7 +731,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
// the diff list, the snapshot to be deleted has been combined or renamed
|
// the diff list, the snapshot to be deleted has been combined or renamed
|
||||||
// to its latest previous snapshot. (besides, we also need to consider nodes
|
// to its latest previous snapshot. (besides, we also need to consider nodes
|
||||||
// created after prior but before snapshot. this will be done in
|
// created after prior but before snapshot. this will be done in
|
||||||
// INodeDirectoryWithSnapshot#cleanSubtree)
|
// DirectoryWithSnapshotFeature)
|
||||||
Snapshot s = snapshot != null && prior != null ? prior : snapshot;
|
Snapshot s = snapshot != null && prior != null ? prior : snapshot;
|
||||||
for (INode child : getChildrenList(s)) {
|
for (INode child : getChildrenList(s)) {
|
||||||
if (snapshot != null && excludedNodes != null
|
if (snapshot != null && excludedNodes != null
|
||||||
|
@ -596,6 +749,10 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
@Override
|
@Override
|
||||||
public void destroyAndCollectBlocks(final BlocksMapUpdateInfo collectedBlocks,
|
public void destroyAndCollectBlocks(final BlocksMapUpdateInfo collectedBlocks,
|
||||||
final List<INode> removedINodes) {
|
final List<INode> removedINodes) {
|
||||||
|
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
sf.clear(this, collectedBlocks, removedINodes);
|
||||||
|
}
|
||||||
for (INode child : getChildrenList(null)) {
|
for (INode child : getChildrenList(null)) {
|
||||||
child.destroyAndCollectBlocks(collectedBlocks, removedINodes);
|
child.destroyAndCollectBlocks(collectedBlocks, removedINodes);
|
||||||
}
|
}
|
||||||
|
@ -608,6 +765,13 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
||||||
final BlocksMapUpdateInfo collectedBlocks,
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
final List<INode> removedINodes, final boolean countDiffChange)
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
|
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
|
// there is snapshot data
|
||||||
|
if (sf != null) {
|
||||||
|
return sf.cleanDirectory(this, snapshot, prior, collectedBlocks,
|
||||||
|
removedINodes, countDiffChange);
|
||||||
|
}
|
||||||
|
// there is no snapshot data
|
||||||
if (prior == null && snapshot == null) {
|
if (prior == null && snapshot == null) {
|
||||||
// destroy the whole subtree and collect blocks that should be deleted
|
// destroy the whole subtree and collect blocks that should be deleted
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
|
|
|
@ -27,7 +27,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.*;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||||
|
@ -246,6 +250,8 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
/* Start of Snapshot Feature */
|
/* Start of Snapshot Feature */
|
||||||
|
|
||||||
private FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
|
private FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
|
||||||
|
Preconditions.checkState(!isWithSnapshot(),
|
||||||
|
"File is already with snapshot");
|
||||||
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
|
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
|
||||||
this.addFeature(sf);
|
this.addFeature(sf);
|
||||||
return sf;
|
return sf;
|
||||||
|
@ -279,25 +285,23 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) {
|
public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) {
|
||||||
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
|
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
|
||||||
if (sf != null) {
|
if (sf != null) {
|
||||||
return sf.getSnapshotINode(this, snapshot);
|
return sf.getDiffs().getSnapshotINode(snapshot, this);
|
||||||
} else {
|
} else {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public INodeFile recordModification(final Snapshot latest,
|
public INodeFile recordModification(final Snapshot latest)
|
||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
if (isInLatestSnapshot(latest)) {
|
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
|
||||||
// the file is in snapshot, create a snapshot feature if it does not have
|
// the file is in snapshot, create a snapshot feature if it does not have
|
||||||
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
|
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
|
||||||
if (sf == null) {
|
if (sf == null) {
|
||||||
sf = addSnapshotFeature(null);
|
sf = addSnapshotFeature(null);
|
||||||
}
|
}
|
||||||
// record self in the diff list if necessary
|
// record self in the diff list if necessary
|
||||||
if (!shouldRecordInSrcSnapshot(latest)) {
|
sf.getDiffs().saveSelf2Snapshot(latest, this, null);
|
||||||
sf.getDiffs().saveSelf2Snapshot(latest, this, null);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -349,7 +353,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
/** Set the replication factor of this file. */
|
/** Set the replication factor of this file. */
|
||||||
public final INodeFile setFileReplication(short replication, Snapshot latest,
|
public final INodeFile setFileReplication(short replication, Snapshot latest,
|
||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
final INodeMap inodeMap) throws QuotaExceededException {
|
||||||
final INodeFile nodeToUpdate = recordModification(latest, inodeMap);
|
final INodeFile nodeToUpdate = recordModification(latest);
|
||||||
nodeToUpdate.setFileReplication(replication);
|
nodeToUpdate.setFileReplication(replication);
|
||||||
return nodeToUpdate;
|
return nodeToUpdate;
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,8 +89,7 @@ public class INodeMap {
|
||||||
"", "", new FsPermission((short) 0)), 0, 0) {
|
"", "", new FsPermission((short) 0)), 0, 0) {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
INode recordModification(Snapshot latest, INodeMap inodeMap)
|
INode recordModification(Snapshot latest) throws QuotaExceededException {
|
||||||
throws QuotaExceededException {
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ import java.util.List;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
@ -103,9 +103,12 @@ public abstract class INodeReference extends INode {
|
||||||
INode referred = wc.getReferredINode();
|
INode referred = wc.getReferredINode();
|
||||||
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
|
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
|
||||||
return referred.asFile().getDiffs().getPrior(wn.lastSnapshotId);
|
return referred.asFile().getDiffs().getPrior(wn.lastSnapshotId);
|
||||||
} else if (referred instanceof INodeDirectoryWithSnapshot) {
|
} else if (referred.isDirectory()) {
|
||||||
return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
|
DirectoryWithSnapshotFeature sf = referred.asDirectory()
|
||||||
wn.lastSnapshotId);
|
.getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
return sf.getDiffs().getPrior(wn.lastSnapshotId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
@ -231,9 +234,9 @@ public abstract class INodeReference extends INode {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final INode updateModificationTime(long mtime, Snapshot latest,
|
public final INode updateModificationTime(long mtime, Snapshot latest)
|
||||||
INodeMap inodeMap) throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
return referred.updateModificationTime(mtime, latest, inodeMap);
|
return referred.updateModificationTime(mtime, latest);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -252,9 +255,9 @@ public abstract class INodeReference extends INode {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
final INode recordModification(Snapshot latest, final INodeMap inodeMap)
|
final INode recordModification(Snapshot latest)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
referred.recordModification(latest, inodeMap);
|
referred.recordModification(latest);
|
||||||
// reference is never replaced
|
// reference is never replaced
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -547,9 +550,12 @@ public abstract class INodeReference extends INode {
|
||||||
Snapshot snapshot = null;
|
Snapshot snapshot = null;
|
||||||
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
|
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
|
||||||
snapshot = referred.asFile().getDiffs().getPrior(lastSnapshotId);
|
snapshot = referred.asFile().getDiffs().getPrior(lastSnapshotId);
|
||||||
} else if (referred instanceof INodeDirectoryWithSnapshot) {
|
} else if (referred.isDirectory()) {
|
||||||
snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
|
DirectoryWithSnapshotFeature sf = referred.asDirectory()
|
||||||
lastSnapshotId);
|
.getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
snapshot = sf.getDiffs().getPrior(lastSnapshotId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return snapshot;
|
return snapshot;
|
||||||
}
|
}
|
||||||
|
@ -634,10 +640,11 @@ public abstract class INodeReference extends INode {
|
||||||
Snapshot snapshot = getSelfSnapshot(prior);
|
Snapshot snapshot = getSelfSnapshot(prior);
|
||||||
|
|
||||||
INode referred = getReferredINode().asReference().getReferredINode();
|
INode referred = getReferredINode().asReference().getReferredINode();
|
||||||
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
|
if (referred.isFile()) {
|
||||||
// if referred is a file, it must be a file with Snapshot since we did
|
// if referred is a file, it must be a file with snapshot since we did
|
||||||
// recordModification before the rename
|
// recordModification before the rename
|
||||||
INodeFile file = referred.asFile();
|
INodeFile file = referred.asFile();
|
||||||
|
Preconditions.checkState(file.isWithSnapshot());
|
||||||
// make sure we mark the file as deleted
|
// make sure we mark the file as deleted
|
||||||
file.getFileWithSnapshotFeature().deleteCurrentFile();
|
file.getFileWithSnapshotFeature().deleteCurrentFile();
|
||||||
try {
|
try {
|
||||||
|
@ -649,14 +656,14 @@ public abstract class INodeReference extends INode {
|
||||||
} catch (QuotaExceededException e) {
|
} catch (QuotaExceededException e) {
|
||||||
LOG.error("should not exceed quota while snapshot deletion", e);
|
LOG.error("should not exceed quota while snapshot deletion", e);
|
||||||
}
|
}
|
||||||
} else if (referred instanceof INodeDirectoryWithSnapshot) {
|
} else if (referred.isDirectory()) {
|
||||||
// similarly, if referred is a directory, it must be an
|
// similarly, if referred is a directory, it must be an
|
||||||
// INodeDirectoryWithSnapshot
|
// INodeDirectory with snapshot
|
||||||
INodeDirectoryWithSnapshot sdir =
|
INodeDirectory dir = referred.asDirectory();
|
||||||
(INodeDirectoryWithSnapshot) referred;
|
Preconditions.checkState(dir.isWithSnapshot());
|
||||||
try {
|
try {
|
||||||
INodeDirectoryWithSnapshot.destroyDstSubtree(sdir, snapshot, prior,
|
DirectoryWithSnapshotFeature.destroyDstSubtree(dir, snapshot,
|
||||||
collectedBlocks, removedINodes);
|
prior, collectedBlocks, removedINodes);
|
||||||
} catch (QuotaExceededException e) {
|
} catch (QuotaExceededException e) {
|
||||||
LOG.error("should not exceed quota while snapshot deletion", e);
|
LOG.error("should not exceed quota while snapshot deletion", e);
|
||||||
}
|
}
|
||||||
|
@ -670,9 +677,12 @@ public abstract class INodeReference extends INode {
|
||||||
Snapshot lastSnapshot = null;
|
Snapshot lastSnapshot = null;
|
||||||
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
|
if (referred.isFile() && referred.asFile().isWithSnapshot()) {
|
||||||
lastSnapshot = referred.asFile().getDiffs().getLastSnapshot();
|
lastSnapshot = referred.asFile().getDiffs().getLastSnapshot();
|
||||||
} else if (referred instanceof INodeDirectoryWithSnapshot) {
|
} else if (referred.isDirectory()) {
|
||||||
lastSnapshot = ((INodeDirectoryWithSnapshot) referred)
|
DirectoryWithSnapshotFeature sf = referred.asDirectory()
|
||||||
.getLastSnapshot();
|
.getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
lastSnapshot = sf.getLastSnapshot();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (lastSnapshot != null && !lastSnapshot.equals(prior)) {
|
if (lastSnapshot != null && !lastSnapshot.equals(prior)) {
|
||||||
return lastSnapshot;
|
return lastSnapshot;
|
||||||
|
|
|
@ -45,11 +45,10 @@ public class INodeSymlink extends INodeWithAdditionalFields {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
INode recordModification(Snapshot latest, final INodeMap inodeMap)
|
INode recordModification(Snapshot latest) throws QuotaExceededException {
|
||||||
throws QuotaExceededException {
|
|
||||||
if (isInLatestSnapshot(latest)) {
|
if (isInLatestSnapshot(latest)) {
|
||||||
INodeDirectory parent = getParent();
|
INodeDirectory parent = getParent();
|
||||||
parent.saveChild2Snapshot(this, latest, new INodeSymlink(this), inodeMap);
|
parent.saveChild2Snapshot(this, latest, new INodeSymlink(this));
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
|
@ -231,13 +231,13 @@ public abstract class INodeWithAdditionalFields extends INode
|
||||||
|
|
||||||
/** Update modification time if it is larger than the current value. */
|
/** Update modification time if it is larger than the current value. */
|
||||||
@Override
|
@Override
|
||||||
public final INode updateModificationTime(long mtime, Snapshot latest,
|
public final INode updateModificationTime(long mtime, Snapshot latest)
|
||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
Preconditions.checkState(isDirectory());
|
Preconditions.checkState(isDirectory());
|
||||||
if (mtime <= modificationTime) {
|
if (mtime <= modificationTime) {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
return setModificationTime(mtime, latest, inodeMap);
|
return setModificationTime(mtime, latest);
|
||||||
}
|
}
|
||||||
|
|
||||||
final void cloneModificationTime(INodeWithAdditionalFields that) {
|
final void cloneModificationTime(INodeWithAdditionalFields that) {
|
||||||
|
|
|
@ -26,8 +26,8 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
@ -132,11 +132,11 @@ public class INodesInPath {
|
||||||
final boolean isRef = curNode.isReference();
|
final boolean isRef = curNode.isReference();
|
||||||
final boolean isDir = curNode.isDirectory();
|
final boolean isDir = curNode.isDirectory();
|
||||||
final INodeDirectory dir = isDir? curNode.asDirectory(): null;
|
final INodeDirectory dir = isDir? curNode.asDirectory(): null;
|
||||||
if (!isRef && isDir && dir instanceof INodeDirectoryWithSnapshot) {
|
if (!isRef && isDir && dir.isWithSnapshot()) {
|
||||||
//if the path is a non-snapshot path, update the latest snapshot.
|
//if the path is a non-snapshot path, update the latest snapshot.
|
||||||
if (!existing.isSnapshot()) {
|
if (!existing.isSnapshot()) {
|
||||||
existing.updateLatestSnapshot(
|
existing.updateLatestSnapshot(dir.getDirectoryWithSnapshotFeature()
|
||||||
((INodeDirectoryWithSnapshot)dir).getLastSnapshot());
|
.getLastSnapshot());
|
||||||
}
|
}
|
||||||
} else if (isRef && isDir && !lastComp) {
|
} else if (isRef && isDir && !lastComp) {
|
||||||
// If the curNode is a reference node, need to check its dstSnapshot:
|
// If the curNode is a reference node, need to check its dstSnapshot:
|
||||||
|
@ -155,10 +155,10 @@ public class INodesInPath {
|
||||||
if (latest == null || // no snapshot in dst tree of rename
|
if (latest == null || // no snapshot in dst tree of rename
|
||||||
dstSnapshotId >= latest.getId()) { // the above scenario
|
dstSnapshotId >= latest.getId()) { // the above scenario
|
||||||
Snapshot lastSnapshot = null;
|
Snapshot lastSnapshot = null;
|
||||||
if (curNode.isDirectory()
|
DirectoryWithSnapshotFeature sf = null;
|
||||||
&& curNode.asDirectory() instanceof INodeDirectoryWithSnapshot) {
|
if (curNode.isDirectory() &&
|
||||||
lastSnapshot = ((INodeDirectoryWithSnapshot) curNode
|
(sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
|
||||||
.asDirectory()).getLastSnapshot();
|
lastSnapshot = sf.getLastSnapshot();
|
||||||
}
|
}
|
||||||
existing.setSnapshot(lastSnapshot);
|
existing.setSnapshot(lastSnapshot);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1008,6 +1008,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
|
public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
|
||||||
StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
|
StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
|
||||||
verifyRequest(nodeReg);
|
verifyRequest(nodeReg);
|
||||||
|
metrics.incrBlockReceivedAndDeletedOps();
|
||||||
if(blockStateChangeLog.isDebugEnabled()) {
|
if(blockStateChangeLog.isDebugEnabled()) {
|
||||||
blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
|
blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
|
||||||
+"from "+nodeReg+" "+receivedAndDeletedBlocks.length
|
+"from "+nodeReg+" "+receivedAndDeletedBlocks.length
|
||||||
|
|
|
@ -71,6 +71,8 @@ public class NameNodeMetrics {
|
||||||
MutableCounterLong listSnapshottableDirOps;
|
MutableCounterLong listSnapshottableDirOps;
|
||||||
@Metric("Number of snapshotDiffReport operations")
|
@Metric("Number of snapshotDiffReport operations")
|
||||||
MutableCounterLong snapshotDiffReportOps;
|
MutableCounterLong snapshotDiffReportOps;
|
||||||
|
@Metric("Number of blockReceivedAndDeleted calls")
|
||||||
|
MutableCounterLong blockReceivedAndDeletedOps;
|
||||||
|
|
||||||
@Metric("Journal transactions") MutableRate transactions;
|
@Metric("Journal transactions") MutableRate transactions;
|
||||||
@Metric("Journal syncs") MutableRate syncs;
|
@Metric("Journal syncs") MutableRate syncs;
|
||||||
|
@ -209,6 +211,10 @@ public class NameNodeMetrics {
|
||||||
snapshotDiffReportOps.incr();
|
snapshotDiffReportOps.incr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void incrBlockReceivedAndDeletedOps() {
|
||||||
|
blockReceivedAndDeletedOps.incr();
|
||||||
|
}
|
||||||
|
|
||||||
public void addTransaction(long latency) {
|
public void addTransaction(long latency) {
|
||||||
transactions.add(latency);
|
transactions.add(latency);
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,7 +98,7 @@ abstract class AbstractINodeDiff<N extends INode,
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Save the INode state to the snapshot if it is not done already. */
|
/** Save the INode state to the snapshot if it is not done already. */
|
||||||
void saveSnapshotCopy(A snapshotCopy, N currentINode) {
|
void saveSnapshotCopy(A snapshotCopy) {
|
||||||
Preconditions.checkState(snapshotINode == null, "Expected snapshotINode to be null");
|
Preconditions.checkState(snapshotINode == null, "Expected snapshotINode to be null");
|
||||||
snapshotINode = snapshotCopy;
|
snapshotINode = snapshotCopy;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,8 +25,8 @@ import java.util.List;
|
||||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -271,7 +271,7 @@ abstract class AbstractINodeDiffList<N extends INode,
|
||||||
* Note that the current inode is returned if there is no change
|
* Note that the current inode is returned if there is no change
|
||||||
* between the given snapshot and the current state.
|
* between the given snapshot and the current state.
|
||||||
*/
|
*/
|
||||||
A getSnapshotINode(final Snapshot snapshot, final A currentINode) {
|
public A getSnapshotINode(final Snapshot snapshot, final A currentINode) {
|
||||||
final D diff = getDiff(snapshot);
|
final D diff = getDiff(snapshot);
|
||||||
final A inode = diff == null? null: diff.getSnapshotINode();
|
final A inode = diff == null? null: diff.getSnapshotINode();
|
||||||
return inode == null? currentINode: inode;
|
return inode == null? currentINode: inode;
|
||||||
|
@ -306,7 +306,7 @@ abstract class AbstractINodeDiffList<N extends INode,
|
||||||
if (snapshotCopy == null) {
|
if (snapshotCopy == null) {
|
||||||
snapshotCopy = createSnapshotCopy(currentINode);
|
snapshotCopy = createSnapshotCopy(currentINode);
|
||||||
}
|
}
|
||||||
diff.saveSnapshotCopy(snapshotCopy, currentINode);
|
diff.saveSnapshotCopy(snapshotCopy);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
|
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
|
||||||
|
@ -35,10 +36,10 @@ import org.apache.hadoop.hdfs.server.namenode.Content;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
|
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||||
|
@ -51,11 +52,10 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The directory with snapshots. It maintains a list of snapshot diffs for
|
* Feature for directory with snapshot-related information.
|
||||||
* storing snapshot data. When there are modifications to the directory, the old
|
|
||||||
* data is stored in the latest snapshot, if there is any.
|
|
||||||
*/
|
*/
|
||||||
public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
@InterfaceAudience.Private
|
||||||
|
public class DirectoryWithSnapshotFeature implements INode.Feature {
|
||||||
/**
|
/**
|
||||||
* The difference between the current state and a previous snapshot
|
* The difference between the current state and a previous snapshot
|
||||||
* of the children list of an INodeDirectory.
|
* of the children list of an INodeDirectory.
|
||||||
|
@ -95,8 +95,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** clear the created list */
|
/** clear the created list */
|
||||||
private Quota.Counts destroyCreatedList(
|
private Quota.Counts destroyCreatedList(final INodeDirectory currentINode,
|
||||||
final INodeDirectoryWithSnapshot currentINode,
|
|
||||||
final BlocksMapUpdateInfo collectedBlocks,
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
final List<INode> removedINodes) {
|
final List<INode> removedINodes) {
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
|
@ -166,13 +165,12 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
/**
|
/**
|
||||||
* Interpret the diff and generate a list of {@link DiffReportEntry}.
|
* Interpret the diff and generate a list of {@link DiffReportEntry}.
|
||||||
* @param parentPath The relative path of the parent.
|
* @param parentPath The relative path of the parent.
|
||||||
* @param parent The directory that the diff belongs to.
|
|
||||||
* @param fromEarlier True indicates {@code diff=later-earlier},
|
* @param fromEarlier True indicates {@code diff=later-earlier},
|
||||||
* False indicates {@code diff=earlier-later}
|
* False indicates {@code diff=earlier-later}
|
||||||
* @return A list of {@link DiffReportEntry} as the diff report.
|
* @return A list of {@link DiffReportEntry} as the diff report.
|
||||||
*/
|
*/
|
||||||
public List<DiffReportEntry> generateReport(byte[][] parentPath,
|
public List<DiffReportEntry> generateReport(byte[][] parentPath,
|
||||||
INodeDirectoryWithSnapshot parent, boolean fromEarlier) {
|
boolean fromEarlier) {
|
||||||
List<DiffReportEntry> cList = new ArrayList<DiffReportEntry>();
|
List<DiffReportEntry> cList = new ArrayList<DiffReportEntry>();
|
||||||
List<DiffReportEntry> dList = new ArrayList<DiffReportEntry>();
|
List<DiffReportEntry> dList = new ArrayList<DiffReportEntry>();
|
||||||
int c = 0, d = 0;
|
int c = 0, d = 0;
|
||||||
|
@ -277,14 +275,15 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
* Since the snapshot is read-only, the logical view of the list is
|
* Since the snapshot is read-only, the logical view of the list is
|
||||||
* never changed although the internal data structure may mutate.
|
* never changed although the internal data structure may mutate.
|
||||||
*/
|
*/
|
||||||
ReadOnlyList<INode> getChildrenList(final INodeDirectory currentDir) {
|
private ReadOnlyList<INode> getChildrenList(final INodeDirectory currentDir) {
|
||||||
return new ReadOnlyList<INode>() {
|
return new ReadOnlyList<INode>() {
|
||||||
private List<INode> children = null;
|
private List<INode> children = null;
|
||||||
|
|
||||||
private List<INode> initChildren() {
|
private List<INode> initChildren() {
|
||||||
if (children == null) {
|
if (children == null) {
|
||||||
final ChildrenDiff combined = new ChildrenDiff();
|
final ChildrenDiff combined = new ChildrenDiff();
|
||||||
for(DirectoryDiff d = DirectoryDiff.this; d != null; d = d.getPosterior()) {
|
for (DirectoryDiff d = DirectoryDiff.this; d != null;
|
||||||
|
d = d.getPosterior()) {
|
||||||
combined.combinePosterior(d.diff, null);
|
combined.combinePosterior(d.diff, null);
|
||||||
}
|
}
|
||||||
children = combined.apply2Current(ReadOnlyList.Util.asList(
|
children = combined.apply2Current(ReadOnlyList.Util.asList(
|
||||||
|
@ -386,7 +385,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Replace the given child in the created/deleted list, if there is any. */
|
/** Replace the given child in the created/deleted list, if there is any. */
|
||||||
private boolean replaceChild(final ListType type, final INode oldChild,
|
public boolean replaceChild(final ListType type, final INode oldChild,
|
||||||
final INode newChild) {
|
final INode newChild) {
|
||||||
final List<DirectoryDiff> diffList = asList();
|
final List<DirectoryDiff> diffList = asList();
|
||||||
for(int i = diffList.size() - 1; i >= 0; i--) {
|
for(int i = diffList.size() - 1; i >= 0; i--) {
|
||||||
|
@ -399,7 +398,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Remove the given child in the created/deleted list, if there is any. */
|
/** Remove the given child in the created/deleted list, if there is any. */
|
||||||
private boolean removeChild(final ListType type, final INode child) {
|
public boolean removeChild(final ListType type, final INode child) {
|
||||||
final List<DirectoryDiff> diffList = asList();
|
final List<DirectoryDiff> diffList = asList();
|
||||||
for(int i = diffList.size() - 1; i >= 0; i--) {
|
for(int i = diffList.size() - 1; i >= 0; i--) {
|
||||||
final ChildrenDiff diff = diffList.get(i).diff;
|
final ChildrenDiff diff = diffList.get(i).diff;
|
||||||
|
@ -411,6 +410,274 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static Map<INode, INode> cloneDiffList(List<INode> diffList) {
|
||||||
|
if (diffList == null || diffList.size() == 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
Map<INode, INode> map = new HashMap<INode, INode>(diffList.size());
|
||||||
|
for (INode node : diffList) {
|
||||||
|
map.put(node, node);
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Destroy a subtree under a DstReference node.
|
||||||
|
*/
|
||||||
|
public static void destroyDstSubtree(INode inode, final Snapshot snapshot,
|
||||||
|
final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes) throws QuotaExceededException {
|
||||||
|
Preconditions.checkArgument(prior != null);
|
||||||
|
if (inode.isReference()) {
|
||||||
|
if (inode instanceof INodeReference.WithName && snapshot != null) {
|
||||||
|
// this inode has been renamed before the deletion of the DstReference
|
||||||
|
// subtree
|
||||||
|
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes,
|
||||||
|
true);
|
||||||
|
} else {
|
||||||
|
// for DstReference node, continue this process to its subtree
|
||||||
|
destroyDstSubtree(inode.asReference().getReferredINode(), snapshot,
|
||||||
|
prior, collectedBlocks, removedINodes);
|
||||||
|
}
|
||||||
|
} else if (inode.isFile()) {
|
||||||
|
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true);
|
||||||
|
} else if (inode.isDirectory()) {
|
||||||
|
Map<INode, INode> excludedNodes = null;
|
||||||
|
INodeDirectory dir = inode.asDirectory();
|
||||||
|
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
DirectoryDiffList diffList = sf.getDiffs();
|
||||||
|
DirectoryDiff priorDiff = diffList.getDiff(prior);
|
||||||
|
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||||
|
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
|
||||||
|
excludedNodes = cloneDiffList(dList);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (snapshot != null) {
|
||||||
|
diffList.deleteSnapshotDiff(snapshot, prior, dir, collectedBlocks,
|
||||||
|
removedINodes, true);
|
||||||
|
}
|
||||||
|
priorDiff = diffList.getDiff(prior);
|
||||||
|
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||||
|
priorDiff.diff.destroyCreatedList(dir, collectedBlocks,
|
||||||
|
removedINodes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (INode child : inode.asDirectory().getChildrenList(prior)) {
|
||||||
|
if (excludedNodes != null && excludedNodes.containsKey(child)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
destroyDstSubtree(child, snapshot, prior, collectedBlocks,
|
||||||
|
removedINodes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean an inode while we move it from the deleted list of post to the
|
||||||
|
* deleted list of prior.
|
||||||
|
* @param inode The inode to clean.
|
||||||
|
* @param post The post snapshot.
|
||||||
|
* @param prior The prior snapshot.
|
||||||
|
* @param collectedBlocks Used to collect blocks for later deletion.
|
||||||
|
* @return Quota usage update.
|
||||||
|
*/
|
||||||
|
private static Quota.Counts cleanDeletedINode(INode inode,
|
||||||
|
final Snapshot post, final Snapshot prior,
|
||||||
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
|
throws QuotaExceededException {
|
||||||
|
Quota.Counts counts = Quota.Counts.newInstance();
|
||||||
|
Deque<INode> queue = new ArrayDeque<INode>();
|
||||||
|
queue.addLast(inode);
|
||||||
|
while (!queue.isEmpty()) {
|
||||||
|
INode topNode = queue.pollFirst();
|
||||||
|
if (topNode instanceof INodeReference.WithName) {
|
||||||
|
INodeReference.WithName wn = (INodeReference.WithName) topNode;
|
||||||
|
if (wn.getLastSnapshotId() >= post.getId()) {
|
||||||
|
wn.cleanSubtree(post, prior, collectedBlocks, removedINodes,
|
||||||
|
countDiffChange);
|
||||||
|
}
|
||||||
|
// For DstReference node, since the node is not in the created list of
|
||||||
|
// prior, we should treat it as regular file/dir
|
||||||
|
} else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
|
||||||
|
INodeFile file = topNode.asFile();
|
||||||
|
counts.add(file.getDiffs().deleteSnapshotDiff(post, prior, file,
|
||||||
|
collectedBlocks, removedINodes, countDiffChange));
|
||||||
|
} else if (topNode.isDirectory()) {
|
||||||
|
INodeDirectory dir = topNode.asDirectory();
|
||||||
|
ChildrenDiff priorChildrenDiff = null;
|
||||||
|
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
|
||||||
|
if (sf != null) {
|
||||||
|
// delete files/dirs created after prior. Note that these
|
||||||
|
// files/dirs, along with inode, were deleted right after post.
|
||||||
|
DirectoryDiff priorDiff = sf.getDiffs().getDiff(prior);
|
||||||
|
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
||||||
|
priorChildrenDiff = priorDiff.getChildrenDiff();
|
||||||
|
counts.add(priorChildrenDiff.destroyCreatedList(dir,
|
||||||
|
collectedBlocks, removedINodes));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (INode child : dir.getChildrenList(prior)) {
|
||||||
|
if (priorChildrenDiff != null
|
||||||
|
&& priorChildrenDiff.search(ListType.DELETED,
|
||||||
|
child.getLocalNameBytes()) != null) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
queue.addLast(child);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return counts;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Diff list sorted by snapshot IDs, i.e. in chronological order. */
|
||||||
|
private final DirectoryDiffList diffs;
|
||||||
|
|
||||||
|
public DirectoryWithSnapshotFeature(DirectoryDiffList diffs) {
|
||||||
|
this.diffs = diffs != null ? diffs : new DirectoryDiffList();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @return the last snapshot. */
|
||||||
|
public Snapshot getLastSnapshot() {
|
||||||
|
return diffs.getLastSnapshot();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @return the snapshot diff list. */
|
||||||
|
public DirectoryDiffList getDiffs() {
|
||||||
|
return diffs;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all the directories that are stored in some snapshot but not in the
|
||||||
|
* current children list. These directories are equivalent to the directories
|
||||||
|
* stored in the deletes lists.
|
||||||
|
*/
|
||||||
|
public void getSnapshotDirectory(List<INodeDirectory> snapshotDir) {
|
||||||
|
for (DirectoryDiff sdiff : diffs) {
|
||||||
|
sdiff.getChildrenDiff().getDirsInDeleted(snapshotDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add an inode into parent's children list. The caller of this method needs
|
||||||
|
* to make sure that parent is in the given snapshot "latest".
|
||||||
|
*/
|
||||||
|
public boolean addChild(INodeDirectory parent, INode inode,
|
||||||
|
boolean setModTime, Snapshot latest) throws QuotaExceededException {
|
||||||
|
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff;
|
||||||
|
int undoInfo = diff.create(inode);
|
||||||
|
|
||||||
|
final boolean added = parent.addChild(inode, setModTime, null);
|
||||||
|
if (!added) {
|
||||||
|
diff.undoCreate(inode, undoInfo);
|
||||||
|
}
|
||||||
|
return added;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove an inode from parent's children list. The caller of this method
|
||||||
|
* needs to make sure that parent is in the given snapshot "latest".
|
||||||
|
*/
|
||||||
|
public boolean removeChild(INodeDirectory parent, INode child,
|
||||||
|
Snapshot latest) throws QuotaExceededException {
|
||||||
|
// For a directory that is not a renamed node, if isInLatestSnapshot returns
|
||||||
|
// false, the directory is not in the latest snapshot, thus we do not need
|
||||||
|
// to record the removed child in any snapshot.
|
||||||
|
// For a directory that was moved/renamed, note that if the directory is in
|
||||||
|
// any of the previous snapshots, we will create a reference node for the
|
||||||
|
// directory while rename, and isInLatestSnapshot will return true in that
|
||||||
|
// scenario (if all previous snapshots have been deleted, isInLatestSnapshot
|
||||||
|
// still returns false). Thus if isInLatestSnapshot returns false, the
|
||||||
|
// directory node cannot be in any snapshot (not in current tree, nor in
|
||||||
|
// previous src tree). Thus we do not need to record the removed child in
|
||||||
|
// any snapshot.
|
||||||
|
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff;
|
||||||
|
UndoInfo<INode> undoInfo = diff.delete(child);
|
||||||
|
|
||||||
|
final boolean removed = parent.removeChild(child);
|
||||||
|
if (!removed && undoInfo != null) {
|
||||||
|
// remove failed, undo
|
||||||
|
diff.undoDelete(child, undoInfo);
|
||||||
|
}
|
||||||
|
return removed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return If there is no corresponding directory diff for the given
|
||||||
|
* snapshot, this means that the current children list should be
|
||||||
|
* returned for the snapshot. Otherwise we calculate the children list
|
||||||
|
* for the snapshot and return it.
|
||||||
|
*/
|
||||||
|
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
|
||||||
|
final Snapshot snapshot) {
|
||||||
|
final DirectoryDiff diff = diffs.getDiff(snapshot);
|
||||||
|
return diff != null ? diff.getChildrenList(currentINode) : currentINode
|
||||||
|
.getChildrenList(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public INode getChild(INodeDirectory currentINode, byte[] name,
|
||||||
|
Snapshot snapshot) {
|
||||||
|
final DirectoryDiff diff = diffs.getDiff(snapshot);
|
||||||
|
return diff != null ? diff.getChild(name, true, currentINode)
|
||||||
|
: currentINode.getChild(name, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Used to record the modification of a symlink node */
|
||||||
|
public INode saveChild2Snapshot(INodeDirectory currentINode,
|
||||||
|
final INode child, final Snapshot latest, final INode snapshotCopy)
|
||||||
|
throws QuotaExceededException {
|
||||||
|
Preconditions.checkArgument(!child.isDirectory(),
|
||||||
|
"child is a directory, child=%s", child);
|
||||||
|
Preconditions.checkArgument(latest != null);
|
||||||
|
|
||||||
|
final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest,
|
||||||
|
currentINode);
|
||||||
|
if (diff.getChild(child.getLocalNameBytes(), false, currentINode) != null) {
|
||||||
|
// it was already saved in the latest snapshot earlier.
|
||||||
|
return child;
|
||||||
|
}
|
||||||
|
|
||||||
|
diff.diff.modify(snapshotCopy, child);
|
||||||
|
return child;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void clear(INodeDirectory currentINode,
|
||||||
|
final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
|
||||||
|
// destroy its diff list
|
||||||
|
for (DirectoryDiff diff : diffs) {
|
||||||
|
diff.destroyDiffAndCollectBlocks(currentINode, collectedBlocks,
|
||||||
|
removedINodes);
|
||||||
|
}
|
||||||
|
diffs.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
|
||||||
|
for(DirectoryDiff d : diffs) {
|
||||||
|
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
|
||||||
|
deleted.computeQuotaUsage(counts, false, Snapshot.INVALID_ID);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
counts.add(Quota.NAMESPACE, diffs.asList().size());
|
||||||
|
return counts;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void computeContentSummary4Snapshot(final Content.Counts counts) {
|
||||||
|
// Create a new blank summary context for blocking processing of subtree.
|
||||||
|
ContentSummaryComputationContext summary =
|
||||||
|
new ContentSummaryComputationContext();
|
||||||
|
for(DirectoryDiff d : diffs) {
|
||||||
|
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
|
||||||
|
deleted.computeContentSummary(summary);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add the counts from deleted trees.
|
||||||
|
counts.add(summary.getCounts());
|
||||||
|
// Add the deleted directory count.
|
||||||
|
counts.add(Content.DIRECTORY, diffs.asList().size());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compute the difference between Snapshots.
|
* Compute the difference between Snapshots.
|
||||||
*
|
*
|
||||||
|
@ -421,10 +688,11 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
* @param diff Used to capture the changes happening to the children. Note
|
* @param diff Used to capture the changes happening to the children. Note
|
||||||
* that the diff still represents (later_snapshot - earlier_snapshot)
|
* that the diff still represents (later_snapshot - earlier_snapshot)
|
||||||
* although toSnapshot can be before fromSnapshot.
|
* although toSnapshot can be before fromSnapshot.
|
||||||
|
* @param currentINode The {@link INodeDirectory} this feature belongs to.
|
||||||
* @return Whether changes happened between the startSnapshot and endSnaphsot.
|
* @return Whether changes happened between the startSnapshot and endSnaphsot.
|
||||||
*/
|
*/
|
||||||
boolean computeDiffBetweenSnapshots(Snapshot fromSnapshot,
|
boolean computeDiffBetweenSnapshots(Snapshot fromSnapshot,
|
||||||
Snapshot toSnapshot, ChildrenDiff diff) {
|
Snapshot toSnapshot, ChildrenDiff diff, INodeDirectory currentINode) {
|
||||||
Snapshot earlier = fromSnapshot;
|
Snapshot earlier = fromSnapshot;
|
||||||
Snapshot later = toSnapshot;
|
Snapshot later = toSnapshot;
|
||||||
if (Snapshot.ID_COMPARATOR.compare(fromSnapshot, toSnapshot) > 0) {
|
if (Snapshot.ID_COMPARATOR.compare(fromSnapshot, toSnapshot) > 0) {
|
||||||
|
@ -432,8 +700,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
later = fromSnapshot;
|
later = fromSnapshot;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean modified = diffs.changedBetweenSnapshots(earlier,
|
boolean modified = diffs.changedBetweenSnapshots(earlier, later);
|
||||||
later);
|
|
||||||
if (!modified) {
|
if (!modified) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -470,233 +737,14 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return !dirCopy.metadataEquals(this);
|
return !dirCopy.metadataEquals(currentINode);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Diff list sorted by snapshot IDs, i.e. in chronological order. */
|
public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
|
||||||
private final DirectoryDiffList diffs;
|
final Snapshot snapshot, Snapshot prior,
|
||||||
|
|
||||||
public INodeDirectoryWithSnapshot(INodeDirectory that) {
|
|
||||||
this(that, true, that instanceof INodeDirectoryWithSnapshot?
|
|
||||||
((INodeDirectoryWithSnapshot)that).getDiffs(): null);
|
|
||||||
}
|
|
||||||
|
|
||||||
INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
|
|
||||||
DirectoryDiffList diffs) {
|
|
||||||
super(that, adopt, true);
|
|
||||||
this.diffs = diffs != null? diffs: new DirectoryDiffList();
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @return the last snapshot. */
|
|
||||||
public Snapshot getLastSnapshot() {
|
|
||||||
return diffs.getLastSnapshot();
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @return the snapshot diff list. */
|
|
||||||
public DirectoryDiffList getDiffs() {
|
|
||||||
return diffs;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public INodeDirectoryAttributes getSnapshotINode(Snapshot snapshot) {
|
|
||||||
return diffs.getSnapshotINode(snapshot, this);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public INodeDirectoryWithSnapshot recordModification(final Snapshot latest,
|
|
||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
|
||||||
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
|
|
||||||
return saveSelf2Snapshot(latest, null);
|
|
||||||
}
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Save the snapshot copy to the latest snapshot. */
|
|
||||||
public INodeDirectoryWithSnapshot saveSelf2Snapshot(
|
|
||||||
final Snapshot latest, final INodeDirectory snapshotCopy)
|
|
||||||
throws QuotaExceededException {
|
|
||||||
diffs.saveSelf2Snapshot(latest, this, snapshotCopy);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public INode saveChild2Snapshot(final INode child, final Snapshot latest,
|
|
||||||
final INode snapshotCopy, final INodeMap inodeMap)
|
|
||||||
throws QuotaExceededException {
|
|
||||||
Preconditions.checkArgument(!child.isDirectory(),
|
|
||||||
"child is a directory, child=%s", child);
|
|
||||||
if (latest == null) {
|
|
||||||
return child;
|
|
||||||
}
|
|
||||||
|
|
||||||
final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, this);
|
|
||||||
if (diff.getChild(child.getLocalNameBytes(), false, this) != null) {
|
|
||||||
// it was already saved in the latest snapshot earlier.
|
|
||||||
return child;
|
|
||||||
}
|
|
||||||
|
|
||||||
diff.diff.modify(snapshotCopy, child);
|
|
||||||
return child;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean addChild(INode inode, boolean setModTime, Snapshot latest,
|
|
||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
|
||||||
ChildrenDiff diff = null;
|
|
||||||
Integer undoInfo = null;
|
|
||||||
if (isInLatestSnapshot(latest)) {
|
|
||||||
diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
|
|
||||||
undoInfo = diff.create(inode);
|
|
||||||
}
|
|
||||||
final boolean added = super.addChild(inode, setModTime, null, inodeMap);
|
|
||||||
if (!added && undoInfo != null) {
|
|
||||||
diff.undoCreate(inode, undoInfo);
|
|
||||||
}
|
|
||||||
return added;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean removeChild(INode child, Snapshot latest,
|
|
||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
|
||||||
ChildrenDiff diff = null;
|
|
||||||
UndoInfo<INode> undoInfo = null;
|
|
||||||
// For a directory that is not a renamed node, if isInLatestSnapshot returns
|
|
||||||
// false, the directory is not in the latest snapshot, thus we do not need
|
|
||||||
// to record the removed child in any snapshot.
|
|
||||||
// For a directory that was moved/renamed, note that if the directory is in
|
|
||||||
// any of the previous snapshots, we will create a reference node for the
|
|
||||||
// directory while rename, and isInLatestSnapshot will return true in that
|
|
||||||
// scenario (if all previous snapshots have been deleted, isInLatestSnapshot
|
|
||||||
// still returns false). Thus if isInLatestSnapshot returns false, the
|
|
||||||
// directory node cannot be in any snapshot (not in current tree, nor in
|
|
||||||
// previous src tree). Thus we do not need to record the removed child in
|
|
||||||
// any snapshot.
|
|
||||||
if (isInLatestSnapshot(latest)) {
|
|
||||||
diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
|
|
||||||
undoInfo = diff.delete(child);
|
|
||||||
}
|
|
||||||
final boolean removed = removeChild(child);
|
|
||||||
if (undoInfo != null) {
|
|
||||||
if (!removed) {
|
|
||||||
//remove failed, undo
|
|
||||||
diff.undoDelete(child, undoInfo);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return removed;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void replaceChild(final INode oldChild, final INode newChild,
|
|
||||||
final INodeMap inodeMap) {
|
|
||||||
super.replaceChild(oldChild, newChild, inodeMap);
|
|
||||||
if (oldChild.getParentReference() != null && !newChild.isReference()) {
|
|
||||||
// oldChild is referred by a Reference node. Thus we are replacing the
|
|
||||||
// referred inode, e.g.,
|
|
||||||
// INodeFileWithSnapshot -> INodeFileUnderConstructionWithSnapshot
|
|
||||||
// in this case, we do not need to update the diff list
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
diffs.replaceChild(ListType.CREATED, oldChild, newChild);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method is usually called by the undo section of rename.
|
|
||||||
*
|
|
||||||
* Before calling this function, in the rename operation, we replace the
|
|
||||||
* original src node (of the rename operation) with a reference node (WithName
|
|
||||||
* instance) in both the children list and a created list, delete the
|
|
||||||
* reference node from the children list, and add it to the corresponding
|
|
||||||
* deleted list.
|
|
||||||
*
|
|
||||||
* To undo the above operations, we have the following steps in particular:
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* 1) remove the WithName node from the deleted list (if it exists)
|
|
||||||
* 2) replace the WithName node in the created list with srcChild
|
|
||||||
* 3) add srcChild back as a child of srcParent. Note that we already add
|
|
||||||
* the node into the created list of a snapshot diff in step 2, we do not need
|
|
||||||
* to add srcChild to the created list of the latest snapshot.
|
|
||||||
* </pre>
|
|
||||||
*
|
|
||||||
* We do not need to update quota usage because the old child is in the
|
|
||||||
* deleted list before.
|
|
||||||
*
|
|
||||||
* @param oldChild
|
|
||||||
* The reference node to be removed/replaced
|
|
||||||
* @param newChild
|
|
||||||
* The node to be added back
|
|
||||||
* @param latestSnapshot
|
|
||||||
* The latest snapshot. Note this may not be the last snapshot in the
|
|
||||||
* {@link #diffs}, since the src tree of the current rename operation
|
|
||||||
* may be the dst tree of a previous rename.
|
|
||||||
* @throws QuotaExceededException should not throw this exception
|
|
||||||
*/
|
|
||||||
public void undoRename4ScrParent(final INodeReference oldChild,
|
|
||||||
final INode newChild, Snapshot latestSnapshot)
|
|
||||||
throws QuotaExceededException {
|
|
||||||
diffs.removeChild(ListType.DELETED, oldChild);
|
|
||||||
diffs.replaceChild(ListType.CREATED, oldChild, newChild);
|
|
||||||
// pass null for inodeMap since the parent node will not get replaced when
|
|
||||||
// undoing rename
|
|
||||||
addChild(newChild, true, null, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Undo the rename operation for the dst tree, i.e., if the rename operation
|
|
||||||
* (with OVERWRITE option) removes a file/dir from the dst tree, add it back
|
|
||||||
* and delete possible record in the deleted list.
|
|
||||||
*/
|
|
||||||
public void undoRename4DstParent(final INode deletedChild,
|
|
||||||
Snapshot latestSnapshot) throws QuotaExceededException {
|
|
||||||
boolean removeDeletedChild = diffs.removeChild(ListType.DELETED,
|
|
||||||
deletedChild);
|
|
||||||
// pass null for inodeMap since the parent node will not get replaced when
|
|
||||||
// undoing rename
|
|
||||||
final boolean added = addChild(deletedChild, true, removeDeletedChild ? null
|
|
||||||
: latestSnapshot, null);
|
|
||||||
// update quota usage if adding is successfully and the old child has not
|
|
||||||
// been stored in deleted list before
|
|
||||||
if (added && !removeDeletedChild) {
|
|
||||||
final Quota.Counts counts = deletedChild.computeQuotaUsage();
|
|
||||||
addSpaceConsumed(counts.get(Quota.NAMESPACE),
|
|
||||||
counts.get(Quota.DISKSPACE), false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ReadOnlyList<INode> getChildrenList(Snapshot snapshot) {
|
|
||||||
final DirectoryDiff diff = diffs.getDiff(snapshot);
|
|
||||||
return diff != null? diff.getChildrenList(this): super.getChildrenList(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public INode getChild(byte[] name, Snapshot snapshot) {
|
|
||||||
final DirectoryDiff diff = diffs.getDiff(snapshot);
|
|
||||||
return diff != null? diff.getChild(name, true, this): super.getChild(name, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toDetailString() {
|
|
||||||
return super.toDetailString() + ", " + diffs;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get all the directories that are stored in some snapshot but not in the
|
|
||||||
* current children list. These directories are equivalent to the directories
|
|
||||||
* stored in the deletes lists.
|
|
||||||
*/
|
|
||||||
public void getSnapshotDirectory(List<INodeDirectory> snapshotDir) {
|
|
||||||
for (DirectoryDiff sdiff : diffs) {
|
|
||||||
sdiff.getChildrenDiff().getDirsInDeleted(snapshotDir);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
|
|
||||||
final BlocksMapUpdateInfo collectedBlocks,
|
final BlocksMapUpdateInfo collectedBlocks,
|
||||||
final List<INode> removedINodes, final boolean countDiffChange)
|
final List<INode> removedINodes, final boolean countDiffChange)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
|
@ -704,12 +752,12 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
Map<INode, INode> priorCreated = null;
|
Map<INode, INode> priorCreated = null;
|
||||||
Map<INode, INode> priorDeleted = null;
|
Map<INode, INode> priorDeleted = null;
|
||||||
if (snapshot == null) { // delete the current directory
|
if (snapshot == null) { // delete the current directory
|
||||||
recordModification(prior, null);
|
currentINode.recordModification(prior);
|
||||||
// delete everything in created list
|
// delete everything in created list
|
||||||
DirectoryDiff lastDiff = diffs.getLast();
|
DirectoryDiff lastDiff = diffs.getLast();
|
||||||
if (lastDiff != null) {
|
if (lastDiff != null) {
|
||||||
counts.add(lastDiff.diff.destroyCreatedList(this, collectedBlocks,
|
counts.add(lastDiff.diff.destroyCreatedList(currentINode,
|
||||||
removedINodes));
|
collectedBlocks, removedINodes));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// update prior
|
// update prior
|
||||||
|
@ -726,7 +774,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, this,
|
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, currentINode,
|
||||||
collectedBlocks, removedINodes, countDiffChange));
|
collectedBlocks, removedINodes, countDiffChange));
|
||||||
|
|
||||||
// check priorDiff again since it may be created during the diff deletion
|
// check priorDiff again since it may be created during the diff deletion
|
||||||
|
@ -767,202 +815,13 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
counts.add(cleanSubtreeRecursively(snapshot, prior, collectedBlocks,
|
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
|
||||||
removedINodes, priorDeleted, countDiffChange));
|
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
|
||||||
|
|
||||||
if (isQuotaSet()) {
|
if (currentINode.isQuotaSet()) {
|
||||||
getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
|
currentINode.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
|
||||||
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
|
||||||
}
|
}
|
||||||
return counts;
|
return counts;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Clean an inode while we move it from the deleted list of post to the
|
|
||||||
* deleted list of prior.
|
|
||||||
* @param inode The inode to clean.
|
|
||||||
* @param post The post snapshot.
|
|
||||||
* @param prior The prior snapshot.
|
|
||||||
* @param collectedBlocks Used to collect blocks for later deletion.
|
|
||||||
* @return Quota usage update.
|
|
||||||
*/
|
|
||||||
private static Quota.Counts cleanDeletedINode(INode inode,
|
|
||||||
final Snapshot post, final Snapshot prior,
|
|
||||||
final BlocksMapUpdateInfo collectedBlocks,
|
|
||||||
final List<INode> removedINodes, final boolean countDiffChange)
|
|
||||||
throws QuotaExceededException {
|
|
||||||
Quota.Counts counts = Quota.Counts.newInstance();
|
|
||||||
Deque<INode> queue = new ArrayDeque<INode>();
|
|
||||||
queue.addLast(inode);
|
|
||||||
while (!queue.isEmpty()) {
|
|
||||||
INode topNode = queue.pollFirst();
|
|
||||||
if (topNode instanceof INodeReference.WithName) {
|
|
||||||
INodeReference.WithName wn = (INodeReference.WithName) topNode;
|
|
||||||
if (wn.getLastSnapshotId() >= post.getId()) {
|
|
||||||
wn.cleanSubtree(post, prior, collectedBlocks, removedINodes,
|
|
||||||
countDiffChange);
|
|
||||||
}
|
|
||||||
// For DstReference node, since the node is not in the created list of
|
|
||||||
// prior, we should treat it as regular file/dir
|
|
||||||
} else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
|
|
||||||
INodeFile file = topNode.asFile();
|
|
||||||
counts.add(file.getDiffs().deleteSnapshotDiff(post, prior, file,
|
|
||||||
collectedBlocks, removedINodes, countDiffChange));
|
|
||||||
} else if (topNode.isDirectory()) {
|
|
||||||
INodeDirectory dir = topNode.asDirectory();
|
|
||||||
ChildrenDiff priorChildrenDiff = null;
|
|
||||||
if (dir instanceof INodeDirectoryWithSnapshot) {
|
|
||||||
// delete files/dirs created after prior. Note that these
|
|
||||||
// files/dirs, along with inode, were deleted right after post.
|
|
||||||
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
|
|
||||||
DirectoryDiff priorDiff = sdir.getDiffs().getDiff(prior);
|
|
||||||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
|
||||||
priorChildrenDiff = priorDiff.getChildrenDiff();
|
|
||||||
counts.add(priorChildrenDiff.destroyCreatedList(sdir,
|
|
||||||
collectedBlocks, removedINodes));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (INode child : dir.getChildrenList(prior)) {
|
|
||||||
if (priorChildrenDiff != null
|
|
||||||
&& priorChildrenDiff.search(ListType.DELETED,
|
|
||||||
child.getLocalNameBytes()) != null) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
queue.addLast(child);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return counts;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void destroyAndCollectBlocks(
|
|
||||||
final BlocksMapUpdateInfo collectedBlocks,
|
|
||||||
final List<INode> removedINodes) {
|
|
||||||
// destroy its diff list
|
|
||||||
for (DirectoryDiff diff : diffs) {
|
|
||||||
diff.destroyDiffAndCollectBlocks(this, collectedBlocks, removedINodes);
|
|
||||||
}
|
|
||||||
diffs.clear();
|
|
||||||
super.destroyAndCollectBlocks(collectedBlocks, removedINodes);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
|
|
||||||
boolean useCache, int lastSnapshotId) {
|
|
||||||
if ((useCache && isQuotaSet()) || lastSnapshotId == Snapshot.INVALID_ID) {
|
|
||||||
return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
|
||||||
}
|
|
||||||
|
|
||||||
Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId);
|
|
||||||
|
|
||||||
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
|
|
||||||
for (INode child : childrenList) {
|
|
||||||
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
|
||||||
}
|
|
||||||
|
|
||||||
counts.add(Quota.NAMESPACE, 1);
|
|
||||||
return counts;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
|
|
||||||
super.computeQuotaUsage4CurrentDirectory(counts);
|
|
||||||
for(DirectoryDiff d : diffs) {
|
|
||||||
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
|
|
||||||
deleted.computeQuotaUsage(counts, false, Snapshot.INVALID_ID);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
counts.add(Quota.NAMESPACE, diffs.asList().size());
|
|
||||||
return counts;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ContentSummaryComputationContext computeContentSummary(
|
|
||||||
final ContentSummaryComputationContext summary) {
|
|
||||||
// Snapshot summary calc won't be relinquishing locks in the middle.
|
|
||||||
// Do this first and handover to parent.
|
|
||||||
computeContentSummary4Snapshot(summary.getCounts());
|
|
||||||
super.computeContentSummary(summary);
|
|
||||||
return summary;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void computeContentSummary4Snapshot(final Content.Counts counts) {
|
|
||||||
// Create a new blank summary context for blocking processing of subtree.
|
|
||||||
ContentSummaryComputationContext summary =
|
|
||||||
new ContentSummaryComputationContext();
|
|
||||||
for(DirectoryDiff d : diffs) {
|
|
||||||
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
|
|
||||||
deleted.computeContentSummary(summary);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Add the counts from deleted trees.
|
|
||||||
counts.add(summary.getCounts());
|
|
||||||
// Add the deleted directory count.
|
|
||||||
counts.add(Content.DIRECTORY, diffs.asList().size());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Map<INode, INode> cloneDiffList(List<INode> diffList) {
|
|
||||||
if (diffList == null || diffList.size() == 0) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
Map<INode, INode> map = new HashMap<INode, INode>(diffList.size());
|
|
||||||
for (INode node : diffList) {
|
|
||||||
map.put(node, node);
|
|
||||||
}
|
|
||||||
return map;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Destroy a subtree under a DstReference node.
|
|
||||||
*/
|
|
||||||
public static void destroyDstSubtree(INode inode, final Snapshot snapshot,
|
|
||||||
final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
|
|
||||||
final List<INode> removedINodes) throws QuotaExceededException {
|
|
||||||
Preconditions.checkArgument(prior != null);
|
|
||||||
if (inode.isReference()) {
|
|
||||||
if (inode instanceof INodeReference.WithName && snapshot != null) {
|
|
||||||
// this inode has been renamed before the deletion of the DstReference
|
|
||||||
// subtree
|
|
||||||
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes,
|
|
||||||
true);
|
|
||||||
} else {
|
|
||||||
// for DstReference node, continue this process to its subtree
|
|
||||||
destroyDstSubtree(inode.asReference().getReferredINode(), snapshot,
|
|
||||||
prior, collectedBlocks, removedINodes);
|
|
||||||
}
|
|
||||||
} else if (inode.isFile()) {
|
|
||||||
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true);
|
|
||||||
} else if (inode.isDirectory()) {
|
|
||||||
Map<INode, INode> excludedNodes = null;
|
|
||||||
if (inode instanceof INodeDirectoryWithSnapshot) {
|
|
||||||
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) inode;
|
|
||||||
|
|
||||||
DirectoryDiffList diffList = sdir.getDiffs();
|
|
||||||
DirectoryDiff priorDiff = diffList.getDiff(prior);
|
|
||||||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
|
||||||
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
|
|
||||||
excludedNodes = cloneDiffList(dList);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (snapshot != null) {
|
|
||||||
diffList.deleteSnapshotDiff(snapshot, prior, sdir, collectedBlocks,
|
|
||||||
removedINodes, true);
|
|
||||||
}
|
|
||||||
priorDiff = diffList.getDiff(prior);
|
|
||||||
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
|
|
||||||
priorDiff.diff.destroyCreatedList(sdir, collectedBlocks,
|
|
||||||
removedINodes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (INode child : inode.asDirectory().getChildrenList(prior)) {
|
|
||||||
if (excludedNodes != null && excludedNodes.containsKey(child)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
destroyDstSubtree(child, snapshot, prior, collectedBlocks,
|
|
||||||
removedINodes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -57,10 +56,6 @@ public class FileWithSnapshotFeature implements INode.Feature {
|
||||||
isCurrentFileDeleted = true;
|
isCurrentFileDeleted = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public INodeFileAttributes getSnapshotINode(INodeFile f, Snapshot snapshot) {
|
|
||||||
return diffs.getSnapshotINode(snapshot, f);
|
|
||||||
}
|
|
||||||
|
|
||||||
public FileDiffList getDiffs() {
|
public FileDiffList getDiffs() {
|
||||||
return diffs;
|
return diffs;
|
||||||
}
|
}
|
||||||
|
@ -90,7 +85,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
|
||||||
if (snapshot == null) {
|
if (snapshot == null) {
|
||||||
// delete the current file while the file has snapshot feature
|
// delete the current file while the file has snapshot feature
|
||||||
if (!isCurrentFileDeleted()) {
|
if (!isCurrentFileDeleted()) {
|
||||||
file.recordModification(prior, null);
|
file.recordModification(prior);
|
||||||
deleteCurrentFile();
|
deleteCurrentFile();
|
||||||
}
|
}
|
||||||
collectBlocksAndClear(file, collectedBlocks, removedINodes);
|
collectBlocksAndClear(file, collectedBlocks, removedINodes);
|
||||||
|
|
|
@ -44,6 +44,8 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
|
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
@ -58,7 +60,7 @@ import com.google.common.primitives.SignedBytes;
|
||||||
* by the namesystem and FSDirectory locks.
|
* by the namesystem and FSDirectory locks.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
public class INodeDirectorySnapshottable extends INodeDirectory {
|
||||||
/** Limit the number of snapshot per snapshottable directory. */
|
/** Limit the number of snapshot per snapshottable directory. */
|
||||||
static final int SNAPSHOT_LIMIT = 1 << 16;
|
static final int SNAPSHOT_LIMIT = 1 << 16;
|
||||||
|
|
||||||
|
@ -115,8 +117,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
||||||
* the two snapshots, while its associated value is a {@link ChildrenDiff}
|
* the two snapshots, while its associated value is a {@link ChildrenDiff}
|
||||||
* storing the changes (creation/deletion) happened to the children (files).
|
* storing the changes (creation/deletion) happened to the children (files).
|
||||||
*/
|
*/
|
||||||
private final Map<INodeDirectoryWithSnapshot, ChildrenDiff> dirDiffMap =
|
private final Map<INodeDirectory, ChildrenDiff> dirDiffMap =
|
||||||
new HashMap<INodeDirectoryWithSnapshot, ChildrenDiff>();
|
new HashMap<INodeDirectory, ChildrenDiff>();
|
||||||
|
|
||||||
SnapshotDiffInfo(INodeDirectorySnapshottable snapshotRoot, Snapshot start,
|
SnapshotDiffInfo(INodeDirectorySnapshottable snapshotRoot, Snapshot start,
|
||||||
Snapshot end) {
|
Snapshot end) {
|
||||||
|
@ -126,8 +128,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Add a dir-diff pair */
|
/** Add a dir-diff pair */
|
||||||
private void addDirDiff(INodeDirectoryWithSnapshot dir,
|
private void addDirDiff(INodeDirectory dir, byte[][] relativePath,
|
||||||
byte[][] relativePath, ChildrenDiff diff) {
|
ChildrenDiff diff) {
|
||||||
dirDiffMap.put(dir, diff);
|
dirDiffMap.put(dir, diff);
|
||||||
diffMap.put(dir, relativePath);
|
diffMap.put(dir, relativePath);
|
||||||
}
|
}
|
||||||
|
@ -154,8 +156,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
||||||
if (node.isDirectory()) {
|
if (node.isDirectory()) {
|
||||||
ChildrenDiff dirDiff = dirDiffMap.get(node);
|
ChildrenDiff dirDiff = dirDiffMap.get(node);
|
||||||
List<DiffReportEntry> subList = dirDiff.generateReport(
|
List<DiffReportEntry> subList = dirDiff.generateReport(
|
||||||
diffMap.get(node), (INodeDirectoryWithSnapshot) node,
|
diffMap.get(node), isFromEarlier());
|
||||||
isFromEarlier());
|
|
||||||
diffReportList.addAll(subList);
|
diffReportList.addAll(subList);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -183,8 +184,11 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
||||||
private int snapshotQuota = SNAPSHOT_LIMIT;
|
private int snapshotQuota = SNAPSHOT_LIMIT;
|
||||||
|
|
||||||
public INodeDirectorySnapshottable(INodeDirectory dir) {
|
public INodeDirectorySnapshottable(INodeDirectory dir) {
|
||||||
super(dir, true, dir instanceof INodeDirectoryWithSnapshot ?
|
super(dir, true, true);
|
||||||
((INodeDirectoryWithSnapshot) dir).getDiffs(): null);
|
// add snapshot feature if the original directory does not have it
|
||||||
|
if (!isWithSnapshot()) {
|
||||||
|
addSnapshotFeature(null);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the number of existing snapshots. */
|
/** @return the number of existing snapshots. */
|
||||||
|
@ -298,8 +302,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
||||||
snapshotsByNames.add(-i - 1, s);
|
snapshotsByNames.add(-i - 1, s);
|
||||||
|
|
||||||
//set modification time
|
//set modification time
|
||||||
updateModificationTime(Time.now(), null, null);
|
updateModificationTime(Time.now(), null);
|
||||||
s.getRoot().setModificationTime(getModificationTime(), null, null);
|
s.getRoot().setModificationTime(getModificationTime(), null);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,12 +417,12 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
||||||
byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
|
byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
|
||||||
if (node.isDirectory()) {
|
if (node.isDirectory()) {
|
||||||
INodeDirectory dir = node.asDirectory();
|
INodeDirectory dir = node.asDirectory();
|
||||||
if (dir instanceof INodeDirectoryWithSnapshot) {
|
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
|
||||||
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
|
if (sf != null) {
|
||||||
boolean change = sdir.computeDiffBetweenSnapshots(
|
boolean change = sf.computeDiffBetweenSnapshots(diffReport.from,
|
||||||
diffReport.from, diffReport.to, diff);
|
diffReport.to, diff, dir);
|
||||||
if (change) {
|
if (change) {
|
||||||
diffReport.addDirDiff(sdir, relativePath, diff);
|
diffReport.addDirDiff(dir, relativePath, diff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ReadOnlyList<INode> children = dir.getChildrenList(diffReport
|
ReadOnlyList<INode> children = dir.getChildrenList(diffReport
|
||||||
|
@ -453,13 +457,15 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
||||||
INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap)
|
INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap)
|
||||||
throws QuotaExceededException {
|
throws QuotaExceededException {
|
||||||
if (latest == null) {
|
if (latest == null) {
|
||||||
Preconditions.checkState(getLastSnapshot() == null,
|
Preconditions.checkState(
|
||||||
|
getDirectoryWithSnapshotFeature().getLastSnapshot() == null,
|
||||||
"latest == null but getLastSnapshot() != null, this=%s", this);
|
"latest == null but getLastSnapshot() != null, this=%s", this);
|
||||||
return replaceSelf4INodeDirectory(inodeMap);
|
|
||||||
} else {
|
|
||||||
return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
|
|
||||||
.recordModification(latest, null);
|
|
||||||
}
|
}
|
||||||
|
INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap);
|
||||||
|
if (latest != null) {
|
||||||
|
dir.recordModification(latest);
|
||||||
|
}
|
||||||
|
return dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -117,9 +117,8 @@ public class Snapshot implements Comparable<byte[]> {
|
||||||
for(; inode != null; inode = inode.getParent()) {
|
for(; inode != null; inode = inode.getParent()) {
|
||||||
if (inode.isDirectory()) {
|
if (inode.isDirectory()) {
|
||||||
final INodeDirectory dir = inode.asDirectory();
|
final INodeDirectory dir = inode.asDirectory();
|
||||||
if (dir instanceof INodeDirectoryWithSnapshot) {
|
if (dir.isWithSnapshot()) {
|
||||||
latest = ((INodeDirectoryWithSnapshot) dir).getDiffs().updatePrior(
|
latest = dir.getDiffs().updatePrior(anchor, latest);
|
||||||
anchor, latest);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,8 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
|
||||||
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
|
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
|
||||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
|
@ -91,8 +91,7 @@ public class SnapshotFSImageFormat {
|
||||||
public static void saveDirectoryDiffList(final INodeDirectory dir,
|
public static void saveDirectoryDiffList(final INodeDirectory dir,
|
||||||
final DataOutput out, final ReferenceMap referenceMap
|
final DataOutput out, final ReferenceMap referenceMap
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
saveINodeDiffs(dir instanceof INodeDirectoryWithSnapshot?
|
saveINodeDiffs(dir.getDiffs(), out, referenceMap);
|
||||||
((INodeDirectoryWithSnapshot)dir).getDiffs(): null, out, referenceMap);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void saveFileDiffList(final INodeFile file,
|
public static void saveFileDiffList(final INodeFile file,
|
||||||
|
@ -139,7 +138,7 @@ public class SnapshotFSImageFormat {
|
||||||
* @return The created node.
|
* @return The created node.
|
||||||
*/
|
*/
|
||||||
private static INode loadCreated(byte[] createdNodeName,
|
private static INode loadCreated(byte[] createdNodeName,
|
||||||
INodeDirectoryWithSnapshot parent) throws IOException {
|
INodeDirectory parent) throws IOException {
|
||||||
// the INode in the created list should be a reference to another INode
|
// the INode in the created list should be a reference to another INode
|
||||||
// in posterior SnapshotDiffs or one of the current children
|
// in posterior SnapshotDiffs or one of the current children
|
||||||
for (DirectoryDiff postDiff : parent.getDiffs()) {
|
for (DirectoryDiff postDiff : parent.getDiffs()) {
|
||||||
|
@ -165,7 +164,7 @@ public class SnapshotFSImageFormat {
|
||||||
* @param in The {@link DataInput} to read.
|
* @param in The {@link DataInput} to read.
|
||||||
* @return The created list.
|
* @return The created list.
|
||||||
*/
|
*/
|
||||||
private static List<INode> loadCreatedList(INodeDirectoryWithSnapshot parent,
|
private static List<INode> loadCreatedList(INodeDirectory parent,
|
||||||
DataInput in) throws IOException {
|
DataInput in) throws IOException {
|
||||||
// read the size of the created list
|
// read the size of the created list
|
||||||
int createdSize = in.readInt();
|
int createdSize = in.readInt();
|
||||||
|
@ -188,7 +187,7 @@ public class SnapshotFSImageFormat {
|
||||||
* @param loader The {@link Loader} instance.
|
* @param loader The {@link Loader} instance.
|
||||||
* @return The deleted list.
|
* @return The deleted list.
|
||||||
*/
|
*/
|
||||||
private static List<INode> loadDeletedList(INodeDirectoryWithSnapshot parent,
|
private static List<INode> loadDeletedList(INodeDirectory parent,
|
||||||
List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
|
List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
int deletedSize = in.readInt();
|
int deletedSize = in.readInt();
|
||||||
|
@ -239,11 +238,10 @@ public class SnapshotFSImageFormat {
|
||||||
public static void loadDirectoryDiffList(INodeDirectory dir,
|
public static void loadDirectoryDiffList(INodeDirectory dir,
|
||||||
DataInput in, FSImageFormat.Loader loader) throws IOException {
|
DataInput in, FSImageFormat.Loader loader) throws IOException {
|
||||||
final int size = in.readInt();
|
final int size = in.readInt();
|
||||||
if (dir instanceof INodeDirectoryWithSnapshot) {
|
if (dir.isWithSnapshot()) {
|
||||||
INodeDirectoryWithSnapshot withSnapshot = (INodeDirectoryWithSnapshot)dir;
|
DirectoryDiffList diffs = dir.getDiffs();
|
||||||
DirectoryDiffList diffs = withSnapshot.getDiffs();
|
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
diffs.addFirst(loadDirectoryDiff(withSnapshot, in, loader));
|
diffs.addFirst(loadDirectoryDiff(dir, in, loader));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -277,9 +275,8 @@ public class SnapshotFSImageFormat {
|
||||||
* using.
|
* using.
|
||||||
* @return A {@link DirectoryDiff}.
|
* @return A {@link DirectoryDiff}.
|
||||||
*/
|
*/
|
||||||
private static DirectoryDiff loadDirectoryDiff(
|
private static DirectoryDiff loadDirectoryDiff(INodeDirectory parent,
|
||||||
INodeDirectoryWithSnapshot parent, DataInput in,
|
DataInput in, FSImageFormat.Loader loader) throws IOException {
|
||||||
FSImageFormat.Loader loader) throws IOException {
|
|
||||||
// 1. Read the full path of the Snapshot root to identify the Snapshot
|
// 1. Read the full path of the Snapshot root to identify the Snapshot
|
||||||
final Snapshot snapshot = loader.getSnapshot(in);
|
final Snapshot snapshot = loader.getSnapshot(in);
|
||||||
|
|
||||||
|
|
|
@ -2143,17 +2143,14 @@ public class MiniDFSCluster {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a storage directory for a datanode. There are two storage directories
|
* Get a storage directory for a datanode.
|
||||||
* per datanode:
|
|
||||||
* <ol>
|
* <ol>
|
||||||
* <li><base directory>/data/data<2*dnIndex + 1></li>
|
* <li><base directory>/data/data<2*dnIndex + 1></li>
|
||||||
* <li><base directory>/data/data<2*dnIndex + 2></li>
|
* <li><base directory>/data/data<2*dnIndex + 2></li>
|
||||||
* </ol>
|
* </ol>
|
||||||
*
|
*
|
||||||
* @param dnIndex datanode index (starts from 0)
|
* @param dnIndex datanode index (starts from 0)
|
||||||
* @param dirIndex directory index (0 or 1). Index 0 provides access to the
|
* @param dirIndex directory index.
|
||||||
* first storage directory. Index 1 provides access to the second
|
|
||||||
* storage directory.
|
|
||||||
* @return Storage directory
|
* @return Storage directory
|
||||||
*/
|
*/
|
||||||
public static File getStorageDir(int dnIndex, int dirIndex) {
|
public static File getStorageDir(int dnIndex, int dirIndex) {
|
||||||
|
@ -2164,7 +2161,7 @@ public class MiniDFSCluster {
|
||||||
* Calculate the DN instance-specific path for appending to the base dir
|
* Calculate the DN instance-specific path for appending to the base dir
|
||||||
* to determine the location of the storage of a DN instance in the mini cluster
|
* to determine the location of the storage of a DN instance in the mini cluster
|
||||||
* @param dnIndex datanode index
|
* @param dnIndex datanode index
|
||||||
* @param dirIndex directory index (0 or 1).
|
* @param dirIndex directory index.
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
private static String getStorageDirPath(int dnIndex, int dirIndex) {
|
private static String getStorageDirPath(int dnIndex, int dirIndex) {
|
||||||
|
|
|
@ -71,7 +71,15 @@ import org.mockito.invocation.InvocationOnMock;
|
||||||
/**
|
/**
|
||||||
* This test simulates a variety of situations when blocks are being
|
* This test simulates a variety of situations when blocks are being
|
||||||
* intentionally corrupted, unexpectedly modified, and so on before a block
|
* intentionally corrupted, unexpectedly modified, and so on before a block
|
||||||
* report is happening
|
* report is happening.
|
||||||
|
*
|
||||||
|
* For each test case it runs two variations:
|
||||||
|
* #1 - For a given DN, the first variation sends block reports for all
|
||||||
|
* storages in a single call to the NN.
|
||||||
|
* #2 - For a given DN, the second variation sends block reports for each
|
||||||
|
* storage in a separate call.
|
||||||
|
*
|
||||||
|
* The behavior should be the same in either variation.
|
||||||
*/
|
*/
|
||||||
public class TestBlockReport {
|
public class TestBlockReport {
|
||||||
public static final Log LOG = LogFactory.getLog(TestBlockReport.class);
|
public static final Log LOG = LogFactory.getLog(TestBlockReport.class);
|
||||||
|
@ -157,6 +165,113 @@ public class TestBlockReport {
|
||||||
return reports;
|
return reports;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility routine to send block reports to the NN, either in a single call
|
||||||
|
* or reporting one storage per call.
|
||||||
|
*
|
||||||
|
* @param dnR
|
||||||
|
* @param poolId
|
||||||
|
* @param reports
|
||||||
|
* @param needtoSplit
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private void sendBlockReports(DatanodeRegistration dnR, String poolId,
|
||||||
|
StorageBlockReport[] reports, boolean needtoSplit) throws IOException {
|
||||||
|
if (!needtoSplit) {
|
||||||
|
LOG.info("Sending combined block reports for " + dnR);
|
||||||
|
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
|
||||||
|
} else {
|
||||||
|
for (StorageBlockReport report : reports) {
|
||||||
|
LOG.info("Sending block report for storage " + report.getStorage().getStorageID());
|
||||||
|
StorageBlockReport[] singletonReport = { report };
|
||||||
|
cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test variations blockReport_01 through blockReport_09 with combined
|
||||||
|
* and split block reports.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void blockReportCombined_01() throws IOException {
|
||||||
|
blockReport_01(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportSplit_01() throws IOException {
|
||||||
|
blockReport_01(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportCombined_02() throws IOException {
|
||||||
|
blockReport_02(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportSplit_02() throws IOException {
|
||||||
|
blockReport_02(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportCombined_03() throws IOException {
|
||||||
|
blockReport_03(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportSplit_03() throws IOException {
|
||||||
|
blockReport_03(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportCombined_04() throws IOException {
|
||||||
|
blockReport_04(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportSplit_04() throws IOException {
|
||||||
|
blockReport_04(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportCombined_06() throws Exception {
|
||||||
|
blockReport_06(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportSplit_06() throws Exception {
|
||||||
|
blockReport_06(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportCombined_07() throws Exception {
|
||||||
|
blockReport_07(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportSplit_07() throws Exception {
|
||||||
|
blockReport_07(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportCombined_08() throws Exception {
|
||||||
|
blockReport_08(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportSplit_08() throws Exception {
|
||||||
|
blockReport_08(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportCombined_09() throws Exception {
|
||||||
|
blockReport_09(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void blockReportSplit_09() throws Exception {
|
||||||
|
blockReport_09(true);
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Test write a file, verifies and closes it. Then the length of the blocks
|
* Test write a file, verifies and closes it. Then the length of the blocks
|
||||||
* are messed up and BlockReport is forced.
|
* are messed up and BlockReport is forced.
|
||||||
|
@ -164,8 +279,7 @@ public class TestBlockReport {
|
||||||
*
|
*
|
||||||
* @throws java.io.IOException on an error
|
* @throws java.io.IOException on an error
|
||||||
*/
|
*/
|
||||||
@Test
|
private void blockReport_01(boolean splitBlockReports) throws IOException {
|
||||||
public void blockReport_01() throws IOException {
|
|
||||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||||
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
||||||
|
|
||||||
|
@ -198,7 +312,7 @@ public class TestBlockReport {
|
||||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||||
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
||||||
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
|
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
|
|
||||||
List<LocatedBlock> blocksAfterReport =
|
List<LocatedBlock> blocksAfterReport =
|
||||||
DFSTestUtil.getAllBlocks(fs.open(filePath));
|
DFSTestUtil.getAllBlocks(fs.open(filePath));
|
||||||
|
@ -224,8 +338,7 @@ public class TestBlockReport {
|
||||||
*
|
*
|
||||||
* @throws IOException in case of errors
|
* @throws IOException in case of errors
|
||||||
*/
|
*/
|
||||||
@Test
|
private void blockReport_02(boolean splitBlockReports) throws IOException {
|
||||||
public void blockReport_02() throws IOException {
|
|
||||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||||
LOG.info("Running test " + METHOD_NAME);
|
LOG.info("Running test " + METHOD_NAME);
|
||||||
|
|
||||||
|
@ -280,7 +393,7 @@ public class TestBlockReport {
|
||||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||||
DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
|
DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
|
||||||
StorageBlockReport[] reports = getBlockReports(dn0, poolId, false, false);
|
StorageBlockReport[] reports = getBlockReports(dn0, poolId, false, false);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
|
|
||||||
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
|
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
|
||||||
.getBlockManager());
|
.getBlockManager());
|
||||||
|
@ -301,8 +414,7 @@ public class TestBlockReport {
|
||||||
*
|
*
|
||||||
* @throws IOException in case of an error
|
* @throws IOException in case of an error
|
||||||
*/
|
*/
|
||||||
@Test
|
private void blockReport_03(boolean splitBlockReports) throws IOException {
|
||||||
public void blockReport_03() throws IOException {
|
|
||||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||||
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
||||||
ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
|
ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
|
||||||
|
@ -312,11 +424,7 @@ public class TestBlockReport {
|
||||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||||
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
||||||
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
|
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
|
||||||
DatanodeCommand dnCmd =
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Got the command: " + dnCmd);
|
|
||||||
}
|
|
||||||
printStats();
|
printStats();
|
||||||
|
|
||||||
assertThat("Wrong number of corrupt blocks",
|
assertThat("Wrong number of corrupt blocks",
|
||||||
|
@ -333,8 +441,7 @@ public class TestBlockReport {
|
||||||
*
|
*
|
||||||
* @throws IOException in case of an error
|
* @throws IOException in case of an error
|
||||||
*/
|
*/
|
||||||
@Test
|
private void blockReport_04(boolean splitBlockReports) throws IOException {
|
||||||
public void blockReport_04() throws IOException {
|
|
||||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||||
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
||||||
DFSTestUtil.createFile(fs, filePath,
|
DFSTestUtil.createFile(fs, filePath,
|
||||||
|
@ -352,11 +459,7 @@ public class TestBlockReport {
|
||||||
|
|
||||||
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
||||||
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
|
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
|
||||||
DatanodeCommand dnCmd =
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Got the command: " + dnCmd);
|
|
||||||
}
|
|
||||||
printStats();
|
printStats();
|
||||||
|
|
||||||
assertThat("Wrong number of corrupt blocks",
|
assertThat("Wrong number of corrupt blocks",
|
||||||
|
@ -373,8 +476,7 @@ public class TestBlockReport {
|
||||||
*
|
*
|
||||||
* @throws IOException in case of an error
|
* @throws IOException in case of an error
|
||||||
*/
|
*/
|
||||||
@Test
|
private void blockReport_06(boolean splitBlockReports) throws Exception {
|
||||||
public void blockReport_06() throws Exception {
|
|
||||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||||
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
||||||
final int DN_N1 = DN_N0 + 1;
|
final int DN_N1 = DN_N0 + 1;
|
||||||
|
@ -387,7 +489,7 @@ public class TestBlockReport {
|
||||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||||
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
||||||
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
|
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
printStats();
|
printStats();
|
||||||
assertEquals("Wrong number of PendingReplication Blocks",
|
assertEquals("Wrong number of PendingReplication Blocks",
|
||||||
0, cluster.getNamesystem().getUnderReplicatedBlocks());
|
0, cluster.getNamesystem().getUnderReplicatedBlocks());
|
||||||
|
@ -406,8 +508,7 @@ public class TestBlockReport {
|
||||||
*
|
*
|
||||||
* @throws IOException in case of an error
|
* @throws IOException in case of an error
|
||||||
*/
|
*/
|
||||||
@Test
|
private void blockReport_07(boolean splitBlockReports) throws Exception {
|
||||||
public void blockReport_07() throws Exception {
|
|
||||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||||
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
||||||
final int DN_N1 = DN_N0 + 1;
|
final int DN_N1 = DN_N0 + 1;
|
||||||
|
@ -421,7 +522,7 @@ public class TestBlockReport {
|
||||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||||
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
||||||
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
|
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
printStats();
|
printStats();
|
||||||
|
|
||||||
assertThat("Wrong number of corrupt blocks",
|
assertThat("Wrong number of corrupt blocks",
|
||||||
|
@ -432,7 +533,7 @@ public class TestBlockReport {
|
||||||
cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
|
cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
|
||||||
|
|
||||||
reports = getBlockReports(dn, poolId, true, true);
|
reports = getBlockReports(dn, poolId, true, true);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
printStats();
|
printStats();
|
||||||
|
|
||||||
assertThat("Wrong number of corrupt blocks",
|
assertThat("Wrong number of corrupt blocks",
|
||||||
|
@ -458,8 +559,7 @@ public class TestBlockReport {
|
||||||
*
|
*
|
||||||
* @throws IOException in case of an error
|
* @throws IOException in case of an error
|
||||||
*/
|
*/
|
||||||
@Test
|
private void blockReport_08(boolean splitBlockReports) throws IOException {
|
||||||
public void blockReport_08() throws IOException {
|
|
||||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||||
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
||||||
final int DN_N1 = DN_N0 + 1;
|
final int DN_N1 = DN_N0 + 1;
|
||||||
|
@ -483,8 +583,8 @@ public class TestBlockReport {
|
||||||
DataNode dn = cluster.getDataNodes().get(DN_N1);
|
DataNode dn = cluster.getDataNodes().get(DN_N1);
|
||||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||||
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
||||||
StorageBlockReport[] report = getBlockReports(dn, poolId, false, false);
|
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
printStats();
|
printStats();
|
||||||
assertEquals("Wrong number of PendingReplication blocks",
|
assertEquals("Wrong number of PendingReplication blocks",
|
||||||
blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
|
blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
|
||||||
|
@ -500,8 +600,7 @@ public class TestBlockReport {
|
||||||
// Similar to BlockReport_08 but corrupts GS and len of the TEMPORARY's
|
// Similar to BlockReport_08 but corrupts GS and len of the TEMPORARY's
|
||||||
// replica block. Expect the same behaviour: NN should simply ignore this
|
// replica block. Expect the same behaviour: NN should simply ignore this
|
||||||
// block
|
// block
|
||||||
@Test
|
private void blockReport_09(boolean splitBlockReports) throws IOException {
|
||||||
public void blockReport_09() throws IOException {
|
|
||||||
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
final String METHOD_NAME = GenericTestUtils.getMethodName();
|
||||||
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
Path filePath = new Path("/" + METHOD_NAME + ".dat");
|
||||||
final int DN_N1 = DN_N0 + 1;
|
final int DN_N1 = DN_N0 + 1;
|
||||||
|
@ -526,8 +625,8 @@ public class TestBlockReport {
|
||||||
DataNode dn = cluster.getDataNodes().get(DN_N1);
|
DataNode dn = cluster.getDataNodes().get(DN_N1);
|
||||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||||
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
||||||
StorageBlockReport[] report = getBlockReports(dn, poolId, true, true);
|
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
|
||||||
cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
|
sendBlockReports(dnR, poolId, reports, splitBlockReports);
|
||||||
printStats();
|
printStats();
|
||||||
assertEquals("Wrong number of PendingReplication blocks",
|
assertEquals("Wrong number of PendingReplication blocks",
|
||||||
2, cluster.getNamesystem().getPendingReplicationBlocks());
|
2, cluster.getNamesystem().getPendingReplicationBlocks());
|
||||||
|
|
|
@ -0,0 +1,87 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
import java.io.*;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.hamcrest.core.IsNot.not;
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test to verify that the DataNode Uuid is correctly initialized before
|
||||||
|
* FsDataSet initialization.
|
||||||
|
*/
|
||||||
|
public class TestDataNodeInitStorage {
|
||||||
|
public static final Log LOG = LogFactory.getLog(TestDataNodeInitStorage.class);
|
||||||
|
|
||||||
|
static private class SimulatedFsDatasetVerifier extends SimulatedFSDataset {
|
||||||
|
static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> {
|
||||||
|
@Override
|
||||||
|
public SimulatedFsDatasetVerifier newInstance(
|
||||||
|
DataNode datanode, DataStorage storage,
|
||||||
|
Configuration conf) throws IOException {
|
||||||
|
return new SimulatedFsDatasetVerifier(storage, conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isSimulated() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void setFactory(Configuration conf) {
|
||||||
|
conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
|
||||||
|
Factory.class.getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
// This constructor does the actual verification by ensuring that
|
||||||
|
// the DatanodeUuid is initialized.
|
||||||
|
public SimulatedFsDatasetVerifier(DataStorage storage, Configuration conf) {
|
||||||
|
super(storage, conf);
|
||||||
|
LOG.info("Assigned DatanodeUuid is " + storage.getDatanodeUuid());
|
||||||
|
assert(storage.getDatanodeUuid() != null);
|
||||||
|
assert(storage.getDatanodeUuid().length() != 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test (timeout = 60000)
|
||||||
|
public void testDataNodeInitStorage() throws Throwable {
|
||||||
|
// Create configuration to use SimulatedFsDatasetVerifier#Factory.
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
SimulatedFsDatasetVerifier.setFactory(conf);
|
||||||
|
|
||||||
|
// Start a cluster so that SimulatedFsDatasetVerifier constructor is
|
||||||
|
// invoked.
|
||||||
|
MiniDFSCluster cluster =
|
||||||
|
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,213 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
||||||
|
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
|
||||||
|
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||||
|
import static org.hamcrest.core.Is.is;
|
||||||
|
import static org.junit.Assert.assertThat;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.commons.logging.impl.Log4JLogger;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.*;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.*;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test verifies that incremental block reports from a single DataNode are
|
||||||
|
* correctly handled by NN. Tests the following variations:
|
||||||
|
* #1 - Incremental BRs from all storages combined in a single call.
|
||||||
|
* #2 - Incremental BRs from separate storages sent in separate calls.
|
||||||
|
*
|
||||||
|
* We also verify that the DataNode is not splitting the reports (it may do so
|
||||||
|
* in the future).
|
||||||
|
*/
|
||||||
|
public class TestIncrementalBrVariations {
|
||||||
|
public static final Log LOG = LogFactory.getLog(TestIncrementalBrVariations.class);
|
||||||
|
|
||||||
|
private static short NUM_DATANODES = 1;
|
||||||
|
static final int BLOCK_SIZE = 1024;
|
||||||
|
static final int NUM_BLOCKS = 10;
|
||||||
|
private static final long seed = 0xFACEFEEDL;
|
||||||
|
private static final String NN_METRICS = "NameNodeActivity";
|
||||||
|
|
||||||
|
|
||||||
|
private MiniDFSCluster cluster;
|
||||||
|
private DistributedFileSystem fs;
|
||||||
|
private DFSClient client;
|
||||||
|
private static Configuration conf;
|
||||||
|
|
||||||
|
static {
|
||||||
|
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger) BlockManager.blockLog).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger) NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger) TestIncrementalBrVariations.LOG).getLogger().setLevel(Level.ALL);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void startUpCluster() throws IOException {
|
||||||
|
conf = new Configuration();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
|
||||||
|
fs = cluster.getFileSystem();
|
||||||
|
client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
|
||||||
|
cluster.getConfiguration(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void shutDownCluster() throws IOException {
|
||||||
|
client.close();
|
||||||
|
fs.close();
|
||||||
|
cluster.shutdownDataNodes();
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Incremental BRs from all storages combined in a single message.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testCombinedIncrementalBlockReport() throws IOException {
|
||||||
|
verifyIncrementalBlockReports(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* One incremental BR per storage.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSplitIncrementalBlockReport() throws IOException {
|
||||||
|
verifyIncrementalBlockReports(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
private LocatedBlocks createFileGetBlocks(String filenamePrefix) throws IOException {
|
||||||
|
Path filePath = new Path("/" + filenamePrefix + ".dat");
|
||||||
|
|
||||||
|
// Write out a file with a few blocks, get block locations.
|
||||||
|
DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS,
|
||||||
|
BLOCK_SIZE, NUM_DATANODES, seed);
|
||||||
|
|
||||||
|
// Get the block list for the file with the block locations.
|
||||||
|
LocatedBlocks blocks = client.getLocatedBlocks(
|
||||||
|
filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS);
|
||||||
|
assertThat(cluster.getNamesystem().getUnderReplicatedBlocks(), is(0L));
|
||||||
|
return blocks;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void verifyIncrementalBlockReports(boolean splitReports) throws IOException {
|
||||||
|
// Get the block list for the file with the block locations.
|
||||||
|
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
|
||||||
|
|
||||||
|
// A blocks belong to the same file, hence same BP
|
||||||
|
DataNode dn = cluster.getDataNodes().get(0);
|
||||||
|
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||||
|
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
|
||||||
|
|
||||||
|
// We will send 'fake' incremental block reports to the NN that look
|
||||||
|
// like they originated from DN 0.
|
||||||
|
StorageReceivedDeletedBlocks reports[] =
|
||||||
|
new StorageReceivedDeletedBlocks[dn.getFSDataset().getVolumes().size()];
|
||||||
|
|
||||||
|
// Lie to the NN that one block on each storage has been deleted.
|
||||||
|
for (int i = 0; i < reports.length; ++i) {
|
||||||
|
FsVolumeSpi volume = dn.getFSDataset().getVolumes().get(i);
|
||||||
|
|
||||||
|
boolean foundBlockOnStorage = false;
|
||||||
|
ReceivedDeletedBlockInfo rdbi[] = new ReceivedDeletedBlockInfo[1];
|
||||||
|
|
||||||
|
// Find the first block on this storage and mark it as deleted for the
|
||||||
|
// report.
|
||||||
|
for (LocatedBlock block : blocks.getLocatedBlocks()) {
|
||||||
|
if (block.getStorageIDs()[0].equals(volume.getStorageID())) {
|
||||||
|
rdbi[0] = new ReceivedDeletedBlockInfo(block.getBlock().getLocalBlock(),
|
||||||
|
ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null);
|
||||||
|
foundBlockOnStorage = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assertTrue(foundBlockOnStorage);
|
||||||
|
reports[i] = new StorageReceivedDeletedBlocks(volume.getStorageID(), rdbi);
|
||||||
|
|
||||||
|
if (splitReports) {
|
||||||
|
// If we are splitting reports then send the report for this storage now.
|
||||||
|
StorageReceivedDeletedBlocks singletonReport[] = { reports[i] };
|
||||||
|
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, singletonReport);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!splitReports) {
|
||||||
|
// Send a combined report.
|
||||||
|
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, reports);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that the deleted block from each storage was picked up
|
||||||
|
// by the NameNode.
|
||||||
|
assertThat(cluster.getNamesystem().getMissingBlocksCount(), is((long) reports.length));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that the DataNode sends a single incremental block report for all
|
||||||
|
* storages.
|
||||||
|
* @throws IOException
|
||||||
|
* @throws InterruptedException
|
||||||
|
*/
|
||||||
|
@Test (timeout=60000)
|
||||||
|
public void testDataNodeDoesNotSplitReports()
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
|
||||||
|
assertThat(cluster.getDataNodes().size(), is(1));
|
||||||
|
DataNode dn = cluster.getDataNodes().get(0);
|
||||||
|
|
||||||
|
// Remove all blocks from the DataNode.
|
||||||
|
for (LocatedBlock block : blocks.getLocatedBlocks()) {
|
||||||
|
dn.notifyNamenodeDeletedBlock(
|
||||||
|
block.getBlock(), block.getStorageIDs()[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG.info("Triggering report after deleting blocks");
|
||||||
|
long ops = getLongCounter("BlockReceivedAndDeletedOps", getMetrics(NN_METRICS));
|
||||||
|
|
||||||
|
// Trigger a report to the NameNode and give it a few seconds.
|
||||||
|
DataNodeTestUtils.triggerBlockReport(dn);
|
||||||
|
Thread.sleep(5000);
|
||||||
|
|
||||||
|
// Ensure that NameNodeRpcServer.blockReceivedAndDeletes is invoked
|
||||||
|
// exactly once after we triggered the report.
|
||||||
|
assertCounter("BlockReceivedAndDeletedOps", ops+1, getMetrics(NN_METRICS));
|
||||||
|
}
|
||||||
|
}
|
|
@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
||||||
import org.apache.hadoop.hdfs.util.Canceler;
|
import org.apache.hadoop.hdfs.util.Canceler;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
|
|
@ -781,7 +781,7 @@ public class TestINodeFile {
|
||||||
}
|
}
|
||||||
System.out.println("Adding component " + DFSUtil.bytes2String(component));
|
System.out.println("Adding component " + DFSUtil.bytes2String(component));
|
||||||
dir = new INodeDirectory(++id, component, permstatus, 0);
|
dir = new INodeDirectory(++id, component, permstatus, 0);
|
||||||
prev.addChild(dir, false, null, null);
|
prev.addChild(dir, false, null);
|
||||||
prev = dir;
|
prev = dir;
|
||||||
}
|
}
|
||||||
return dir; // Last Inode in the chain
|
return dir; // Last Inode in the chain
|
||||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -206,8 +205,7 @@ public class TestSnapshotPathINodes {
|
||||||
// Check the INode for file1 (snapshot file)
|
// Check the INode for file1 (snapshot file)
|
||||||
INode snapshotFileNode = inodes[inodes.length - 1];
|
INode snapshotFileNode = inodes[inodes.length - 1];
|
||||||
assertINodeFile(snapshotFileNode, file1);
|
assertINodeFile(snapshotFileNode, file1);
|
||||||
assertTrue(snapshotFileNode.getParent() instanceof
|
assertTrue(snapshotFileNode.getParent().isWithSnapshot());
|
||||||
INodeDirectoryWithSnapshot);
|
|
||||||
|
|
||||||
// Call getExistingPathINodes and request only one INode.
|
// Call getExistingPathINodes and request only one INode.
|
||||||
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
|
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
|
||||||
|
|
|
@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
|
|
@ -358,7 +358,7 @@ public class TestNestedSnapshots {
|
||||||
|
|
||||||
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
||||||
INode subNode = fsdir.getINode(sub.toString());
|
INode subNode = fsdir.getINode(sub.toString());
|
||||||
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
|
assertTrue(subNode.asDirectory().isWithSnapshot());
|
||||||
|
|
||||||
hdfs.allowSnapshot(sub);
|
hdfs.allowSnapshot(sub);
|
||||||
subNode = fsdir.getINode(sub.toString());
|
subNode = fsdir.getINode(sub.toString());
|
||||||
|
@ -366,6 +366,6 @@ public class TestNestedSnapshots {
|
||||||
|
|
||||||
hdfs.disallowSnapshot(sub);
|
hdfs.disallowSnapshot(sub);
|
||||||
subNode = fsdir.getINode(sub.toString());
|
subNode = fsdir.getINode(sub.toString());
|
||||||
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
|
assertTrue(subNode.asDirectory().isWithSnapshot());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,12 +59,11 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
@ -757,10 +756,10 @@ public class TestRenameWithSnapshots {
|
||||||
// only 2 references: one in deleted list of sdir1, one in created list of
|
// only 2 references: one in deleted list of sdir1, one in created list of
|
||||||
// sdir1
|
// sdir1
|
||||||
assertEquals(2, fooWithCount.getReferenceCount());
|
assertEquals(2, fooWithCount.getReferenceCount());
|
||||||
INodeDirectoryWithSnapshot foo = (INodeDirectoryWithSnapshot) fooWithCount
|
INodeDirectory foo = fooWithCount.asDirectory();
|
||||||
.asDirectory();
|
|
||||||
assertEquals(1, foo.getDiffs().asList().size());
|
assertEquals(1, foo.getDiffs().asList().size());
|
||||||
assertEquals("s1", foo.getLastSnapshot().getRoot().getLocalName());
|
assertEquals("s1", foo.getDirectoryWithSnapshotFeature().getLastSnapshot()
|
||||||
|
.getRoot().getLocalName());
|
||||||
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
|
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
|
||||||
assertEquals(1, bar1.getDiffs().asList().size());
|
assertEquals(1, bar1.getDiffs().asList().size());
|
||||||
assertEquals("s1", bar1.getDiffs().getLastSnapshot().getRoot()
|
assertEquals("s1", bar1.getDiffs().getLastSnapshot().getRoot()
|
||||||
|
@ -973,8 +972,7 @@ public class TestRenameWithSnapshots {
|
||||||
INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
|
INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
|
||||||
// 5 references: s1, s22, s333, s2222, current tree of sdir1
|
// 5 references: s1, s22, s333, s2222, current tree of sdir1
|
||||||
assertEquals(5, fooWithCount.getReferenceCount());
|
assertEquals(5, fooWithCount.getReferenceCount());
|
||||||
INodeDirectoryWithSnapshot foo = (INodeDirectoryWithSnapshot) fooWithCount
|
INodeDirectory foo = fooWithCount.asDirectory();
|
||||||
.asDirectory();
|
|
||||||
List<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
|
List<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
|
||||||
assertEquals(4, fooDiffs.size());
|
assertEquals(4, fooDiffs.size());
|
||||||
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
|
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
|
||||||
|
@ -1032,7 +1030,7 @@ public class TestRenameWithSnapshots {
|
||||||
fooRef = fsdir.getINode(foo_s2222.toString()).asReference();
|
fooRef = fsdir.getINode(foo_s2222.toString()).asReference();
|
||||||
fooWithCount = (WithCount) fooRef.getReferredINode();
|
fooWithCount = (WithCount) fooRef.getReferredINode();
|
||||||
assertEquals(4, fooWithCount.getReferenceCount());
|
assertEquals(4, fooWithCount.getReferenceCount());
|
||||||
foo = (INodeDirectoryWithSnapshot) fooWithCount.asDirectory();
|
foo = fooWithCount.asDirectory();
|
||||||
fooDiffs = foo.getDiffs().asList();
|
fooDiffs = foo.getDiffs().asList();
|
||||||
assertEquals(4, fooDiffs.size());
|
assertEquals(4, fooDiffs.size());
|
||||||
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
|
assertEquals("s2222", fooDiffs.get(3).snapshot.getRoot().getLocalName());
|
||||||
|
@ -1171,8 +1169,7 @@ public class TestRenameWithSnapshots {
|
||||||
assertTrue(fooRef instanceof INodeReference.WithName);
|
assertTrue(fooRef instanceof INodeReference.WithName);
|
||||||
INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
|
INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
|
||||||
assertEquals(1, fooWC.getReferenceCount());
|
assertEquals(1, fooWC.getReferenceCount());
|
||||||
INodeDirectoryWithSnapshot fooDir = (INodeDirectoryWithSnapshot) fooWC
|
INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
|
||||||
.getReferredINode().asDirectory();
|
|
||||||
List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
|
List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
|
||||||
assertEquals(1, diffs.size());
|
assertEquals(1, diffs.size());
|
||||||
assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName());
|
assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName());
|
||||||
|
@ -1263,7 +1260,7 @@ public class TestRenameWithSnapshots {
|
||||||
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
|
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
|
||||||
INodeDirectory mockDir2 = spy(dir2);
|
INodeDirectory mockDir2 = spy(dir2);
|
||||||
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
|
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
|
||||||
(Snapshot) anyObject(), (INodeMap) anyObject());
|
(Snapshot) anyObject());
|
||||||
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
|
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
|
||||||
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
|
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
|
||||||
|
|
||||||
|
@ -1288,9 +1285,8 @@ public class TestRenameWithSnapshots {
|
||||||
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
|
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
|
||||||
|
|
||||||
INode fooNode = fsdir.getINode4Write(foo.toString());
|
INode fooNode = fsdir.getINode4Write(foo.toString());
|
||||||
assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
|
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
|
||||||
List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode)
|
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
|
||||||
.getDiffs().asList();
|
|
||||||
assertEquals(1, fooDiffs.size());
|
assertEquals(1, fooDiffs.size());
|
||||||
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
|
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
|
||||||
|
|
||||||
|
@ -1302,7 +1298,7 @@ public class TestRenameWithSnapshots {
|
||||||
assertFalse(hdfs.exists(newfoo));
|
assertFalse(hdfs.exists(newfoo));
|
||||||
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
|
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
|
||||||
.asDirectory();
|
.asDirectory();
|
||||||
assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
|
assertFalse(dir2Node.isWithSnapshot());
|
||||||
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
|
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
|
||||||
assertEquals(1, dir2Children.size());
|
assertEquals(1, dir2Children.size());
|
||||||
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
|
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
|
||||||
|
@ -1331,7 +1327,7 @@ public class TestRenameWithSnapshots {
|
||||||
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
|
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
|
||||||
INodeDirectory mockDir2 = spy(dir2);
|
INodeDirectory mockDir2 = spy(dir2);
|
||||||
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
|
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
|
||||||
(Snapshot) anyObject(), (INodeMap) anyObject());
|
(Snapshot) anyObject());
|
||||||
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
|
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
|
||||||
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
|
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
|
||||||
|
|
||||||
|
@ -1366,7 +1362,7 @@ public class TestRenameWithSnapshots {
|
||||||
assertFalse(hdfs.exists(newfoo));
|
assertFalse(hdfs.exists(newfoo));
|
||||||
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
|
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
|
||||||
.asDirectory();
|
.asDirectory();
|
||||||
assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
|
assertFalse(dir2Node.isWithSnapshot());
|
||||||
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
|
ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
|
||||||
assertEquals(1, dir2Children.size());
|
assertEquals(1, dir2Children.size());
|
||||||
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
|
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
|
||||||
|
@ -1393,7 +1389,7 @@ public class TestRenameWithSnapshots {
|
||||||
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
|
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
|
||||||
INodeDirectory mockDir3 = spy(dir3);
|
INodeDirectory mockDir3 = spy(dir3);
|
||||||
doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(),
|
doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(),
|
||||||
(Snapshot) anyObject(), (INodeMap) anyObject());
|
(Snapshot) anyObject());
|
||||||
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
|
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
|
||||||
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
|
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
|
||||||
|
|
||||||
|
@ -1420,8 +1416,7 @@ public class TestRenameWithSnapshots {
|
||||||
INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
|
INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
|
||||||
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
|
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
|
||||||
assertTrue(fooNode instanceof INodeReference.DstReference);
|
assertTrue(fooNode instanceof INodeReference.DstReference);
|
||||||
List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode
|
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
|
||||||
.asDirectory()).getDiffs().asList();
|
|
||||||
assertEquals(1, fooDiffs.size());
|
assertEquals(1, fooDiffs.size());
|
||||||
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
|
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
|
||||||
|
|
||||||
|
@ -1455,8 +1450,7 @@ public class TestRenameWithSnapshots {
|
||||||
assertTrue(hdfs.exists(foo_s3));
|
assertTrue(hdfs.exists(foo_s3));
|
||||||
|
|
||||||
assertTrue(fooNode instanceof INodeReference.DstReference);
|
assertTrue(fooNode instanceof INodeReference.DstReference);
|
||||||
fooDiffs = ((INodeDirectoryWithSnapshot) fooNode.asDirectory()).getDiffs()
|
fooDiffs = fooNode.asDirectory().getDiffs().asList();
|
||||||
.asList();
|
|
||||||
assertEquals(2, fooDiffs.size());
|
assertEquals(2, fooDiffs.size());
|
||||||
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
|
assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
|
||||||
assertEquals("s3", fooDiffs.get(1).snapshot.getRoot().getLocalName());
|
assertEquals("s3", fooDiffs.get(1).snapshot.getRoot().getLocalName());
|
||||||
|
@ -1495,10 +1489,9 @@ public class TestRenameWithSnapshots {
|
||||||
INodeDirectory mockDir3 = spy(dir3);
|
INodeDirectory mockDir3 = spy(dir3);
|
||||||
// fail the rename but succeed in undo
|
// fail the rename but succeed in undo
|
||||||
doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
|
doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
|
||||||
anyBoolean(), (Snapshot) anyObject(), (INodeMap) anyObject());
|
anyBoolean(), (Snapshot) anyObject());
|
||||||
Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(),
|
Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(),
|
||||||
anyBoolean(), (Snapshot) anyObject(),
|
(Snapshot) anyObject())).thenReturn(false).thenCallRealMethod();
|
||||||
(INodeMap) anyObject())).thenReturn(false).thenCallRealMethod();
|
|
||||||
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
|
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
|
||||||
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
|
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
|
||||||
foo3Node.setParent(mockDir3);
|
foo3Node.setParent(mockDir3);
|
||||||
|
@ -1561,7 +1554,7 @@ public class TestRenameWithSnapshots {
|
||||||
.getChildrenList(null));
|
.getChildrenList(null));
|
||||||
assertEquals(1, childrenList.size());
|
assertEquals(1, childrenList.size());
|
||||||
INode fooNode = childrenList.get(0);
|
INode fooNode = childrenList.get(0);
|
||||||
assertTrue(fooNode.getClass() == INodeDirectoryWithSnapshot.class);
|
assertTrue(fooNode.asDirectory().isWithSnapshot());
|
||||||
INode barNode = fsdir.getINode4Write(bar.toString());
|
INode barNode = fsdir.getINode4Write(bar.toString());
|
||||||
assertTrue(barNode.getClass() == INodeFile.class);
|
assertTrue(barNode.getClass() == INodeFile.class);
|
||||||
assertSame(fooNode, barNode.getParent());
|
assertSame(fooNode, barNode.getParent());
|
||||||
|
@ -1637,7 +1630,7 @@ public class TestRenameWithSnapshots {
|
||||||
.getChildrenList(null));
|
.getChildrenList(null));
|
||||||
assertEquals(1, childrenList.size());
|
assertEquals(1, childrenList.size());
|
||||||
INode fooNode = childrenList.get(0);
|
INode fooNode = childrenList.get(0);
|
||||||
assertTrue(fooNode.getClass() == INodeDirectoryWithSnapshot.class);
|
assertTrue(fooNode.asDirectory().isWithSnapshot());
|
||||||
assertSame(dir1Node, fooNode.getParent());
|
assertSame(dir1Node, fooNode.getParent());
|
||||||
List<DirectoryDiff> diffList = ((INodeDirectorySnapshottable) dir1Node)
|
List<DirectoryDiff> diffList = ((INodeDirectorySnapshottable) dir1Node)
|
||||||
.getDiffs().asList();
|
.getDiffs().asList();
|
||||||
|
@ -1656,7 +1649,7 @@ public class TestRenameWithSnapshots {
|
||||||
.getChildrenList(null));
|
.getChildrenList(null));
|
||||||
assertEquals(1, childrenList.size());
|
assertEquals(1, childrenList.size());
|
||||||
INode subdir2Node = childrenList.get(0);
|
INode subdir2Node = childrenList.get(0);
|
||||||
assertTrue(subdir2Node.getClass() == INodeDirectoryWithSnapshot.class);
|
assertTrue(subdir2Node.asDirectory().isWithSnapshot());
|
||||||
assertSame(dir2Node, subdir2Node.getParent());
|
assertSame(dir2Node, subdir2Node.getParent());
|
||||||
assertSame(subdir2Node, fsdir.getINode4Write(sub_dir2.toString()));
|
assertSame(subdir2Node, fsdir.getINode4Write(sub_dir2.toString()));
|
||||||
INode subsubdir2Node = fsdir.getINode4Write(subsub_dir2.toString());
|
INode subsubdir2Node = fsdir.getINode4Write(subsub_dir2.toString());
|
||||||
|
@ -1669,7 +1662,7 @@ public class TestRenameWithSnapshots {
|
||||||
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
|
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
|
||||||
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
|
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
|
||||||
|
|
||||||
diffList = ((INodeDirectoryWithSnapshot) subdir2Node).getDiffs().asList();
|
diffList = subdir2Node.asDirectory().getDiffs().asList();
|
||||||
assertEquals(0, diffList.size());
|
assertEquals(0, diffList.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1697,8 +1690,7 @@ public class TestRenameWithSnapshots {
|
||||||
}
|
}
|
||||||
|
|
||||||
// check
|
// check
|
||||||
INodeDirectoryWithSnapshot fooNode = (INodeDirectoryWithSnapshot) fsdir
|
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
|
||||||
.getINode4Write(foo.toString());
|
|
||||||
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
|
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
|
||||||
assertEquals(1, children.size());
|
assertEquals(1, children.size());
|
||||||
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
|
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
|
||||||
|
@ -1948,8 +1940,7 @@ public class TestRenameWithSnapshots {
|
||||||
INodeReference.WithCount wc =
|
INodeReference.WithCount wc =
|
||||||
(WithCount) fooRef.asReference().getReferredINode();
|
(WithCount) fooRef.asReference().getReferredINode();
|
||||||
assertEquals(1, wc.getReferenceCount());
|
assertEquals(1, wc.getReferenceCount());
|
||||||
INodeDirectoryWithSnapshot fooNode =
|
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
|
||||||
(INodeDirectoryWithSnapshot) wc.getReferredINode().asDirectory();
|
|
||||||
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
|
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
|
||||||
assertEquals(1, children.size());
|
assertEquals(1, children.size());
|
||||||
assertEquals(bar.getName(), children.get(0).getLocalName());
|
assertEquals(bar.getName(), children.get(0).getLocalName());
|
||||||
|
@ -2017,8 +2008,7 @@ public class TestRenameWithSnapshots {
|
||||||
INodeReference.WithCount wc =
|
INodeReference.WithCount wc =
|
||||||
(WithCount) fooRef.asReference().getReferredINode();
|
(WithCount) fooRef.asReference().getReferredINode();
|
||||||
assertEquals(2, wc.getReferenceCount());
|
assertEquals(2, wc.getReferenceCount());
|
||||||
INodeDirectoryWithSnapshot fooNode =
|
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
|
||||||
(INodeDirectoryWithSnapshot) wc.getReferredINode().asDirectory();
|
|
||||||
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
|
ReadOnlyList<INode> children = fooNode.getChildrenList(null);
|
||||||
assertEquals(3, children.size());
|
assertEquals(3, children.size());
|
||||||
assertEquals(bar.getName(), children.get(0).getLocalName());
|
assertEquals(bar.getName(), children.get(0).getLocalName());
|
||||||
|
@ -2044,9 +2034,9 @@ public class TestRenameWithSnapshots {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This test demonstrates that
|
* This test demonstrates that
|
||||||
* {@link INodeDirectoryWithSnapshot#removeChild(INode, Snapshot, INodeMap)}
|
* {@link INodeDirectory#removeChild(INode, Snapshot)}
|
||||||
* and
|
* and
|
||||||
* {@link INodeDirectoryWithSnapshot#addChild(INode, boolean, Snapshot, INodeMap)}
|
* {@link INodeDirectory#addChild(INode, boolean, Snapshot)}
|
||||||
* should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the
|
* should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the
|
||||||
* added/removed child should be recorded in snapshots.
|
* added/removed child should be recorded in snapshots.
|
||||||
*/
|
*/
|
||||||
|
@ -2063,7 +2053,7 @@ public class TestRenameWithSnapshots {
|
||||||
hdfs.mkdirs(foo);
|
hdfs.mkdirs(foo);
|
||||||
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
|
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
|
||||||
final Path bar = new Path(foo, "bar");
|
final Path bar = new Path(foo, "bar");
|
||||||
// create file bar, and foo will become an INodeDirectoryWithSnapshot
|
// create file bar, and foo will become an INodeDirectory with snapshot
|
||||||
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
|
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
|
||||||
// delete snapshot s1. now foo is not in any snapshot
|
// delete snapshot s1. now foo is not in any snapshot
|
||||||
hdfs.deleteSnapshot(dir1, "s1");
|
hdfs.deleteSnapshot(dir1, "s1");
|
||||||
|
@ -2079,7 +2069,7 @@ public class TestRenameWithSnapshots {
|
||||||
|
|
||||||
// delete /dir2/foo. Since it is not in any snapshot, we will call its
|
// delete /dir2/foo. Since it is not in any snapshot, we will call its
|
||||||
// destroy function. If we do not use isInLatestSnapshot in removeChild and
|
// destroy function. If we do not use isInLatestSnapshot in removeChild and
|
||||||
// addChild methods in INodeDirectoryWithSnapshot, the file bar will be
|
// addChild methods in INodeDirectory (with snapshot), the file bar will be
|
||||||
// stored in the deleted list of foo, and will be destroyed.
|
// stored in the deleted list of foo, and will be destroyed.
|
||||||
hdfs.delete(foo2, true);
|
hdfs.delete(foo2, true);
|
||||||
|
|
||||||
|
@ -2130,8 +2120,8 @@ public class TestRenameWithSnapshots {
|
||||||
// check the internal
|
// check the internal
|
||||||
assertFalse("after deleting s0, " + foo_s0 + " should not exist",
|
assertFalse("after deleting s0, " + foo_s0 + " should not exist",
|
||||||
hdfs.exists(foo_s0));
|
hdfs.exists(foo_s0));
|
||||||
INodeDirectoryWithSnapshot dir2Node = (INodeDirectoryWithSnapshot) fsdir
|
INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
|
||||||
.getINode4Write(dir2.toString());
|
.asDirectory();
|
||||||
assertTrue("the diff list of " + dir2
|
assertTrue("the diff list of " + dir2
|
||||||
+ " should be empty after deleting s0", dir2Node.getDiffs().asList()
|
+ " should be empty after deleting s0", dir2Node.getDiffs().asList()
|
||||||
.isEmpty());
|
.isEmpty());
|
||||||
|
@ -2140,16 +2130,14 @@ public class TestRenameWithSnapshots {
|
||||||
INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
|
INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
|
||||||
assertTrue(fooRefNode instanceof INodeReference.DstReference);
|
assertTrue(fooRefNode instanceof INodeReference.DstReference);
|
||||||
INodeDirectory fooNode = fooRefNode.asDirectory();
|
INodeDirectory fooNode = fooRefNode.asDirectory();
|
||||||
// fooNode should be still INodeDirectoryWithSnapshot since we call
|
// fooNode should be still INodeDirectory (With Snapshot) since we call
|
||||||
// recordModification before the rename
|
// recordModification before the rename
|
||||||
assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
|
assertTrue(fooNode.isWithSnapshot());
|
||||||
assertTrue(((INodeDirectoryWithSnapshot) fooNode).getDiffs().asList()
|
assertTrue(fooNode.getDiffs().asList().isEmpty());
|
||||||
.isEmpty());
|
|
||||||
INodeDirectory barNode = fooNode.getChildrenList(null).get(0).asDirectory();
|
INodeDirectory barNode = fooNode.getChildrenList(null).get(0).asDirectory();
|
||||||
// bar should also be an INodeDirectoryWithSnapshot, and both of its diff
|
// bar should also be INodeDirectory (With Snapshot), and both of its diff
|
||||||
// list and children list are empty
|
// list and children list are empty
|
||||||
assertTrue(((INodeDirectoryWithSnapshot) barNode).getDiffs().asList()
|
assertTrue(barNode.getDiffs().asList().isEmpty());
|
||||||
.isEmpty());
|
|
||||||
assertTrue(barNode.getChildrenList(null).isEmpty());
|
assertTrue(barNode.getChildrenList(null).isEmpty());
|
||||||
|
|
||||||
restartClusterAndCheckImage(true);
|
restartClusterAndCheckImage(true);
|
||||||
|
@ -2199,8 +2187,8 @@ public class TestRenameWithSnapshots {
|
||||||
assertTrue(hdfs.exists(file_s0));
|
assertTrue(hdfs.exists(file_s0));
|
||||||
|
|
||||||
// check dir1: foo should be in the created list of s0
|
// check dir1: foo should be in the created list of s0
|
||||||
INodeDirectoryWithSnapshot dir1Node = (INodeDirectoryWithSnapshot) fsdir
|
INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
|
||||||
.getINode4Write(dir1.toString());
|
.asDirectory();
|
||||||
List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
|
List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
|
||||||
assertEquals(1, dir1DiffList.size());
|
assertEquals(1, dir1DiffList.size());
|
||||||
List<INode> dList = dir1DiffList.get(0).getChildrenDiff()
|
List<INode> dList = dir1DiffList.get(0).getChildrenDiff()
|
||||||
|
@ -2215,8 +2203,8 @@ public class TestRenameWithSnapshots {
|
||||||
|
|
||||||
// check foo and its subtree
|
// check foo and its subtree
|
||||||
final Path newbar = new Path(newfoo, bar.getName());
|
final Path newbar = new Path(newfoo, bar.getName());
|
||||||
INodeDirectoryWithSnapshot barNode = (INodeDirectoryWithSnapshot) fsdir
|
INodeDirectory barNode = fsdir.getINode4Write(newbar.toString())
|
||||||
.getINode4Write(newbar.toString());
|
.asDirectory();
|
||||||
assertSame(fooNode.asDirectory(), barNode.getParent());
|
assertSame(fooNode.asDirectory(), barNode.getParent());
|
||||||
// bar should only have a snapshot diff for s0
|
// bar should only have a snapshot diff for s0
|
||||||
List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
|
List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
|
||||||
|
@ -2229,8 +2217,8 @@ public class TestRenameWithSnapshots {
|
||||||
|
|
||||||
// check dir2: a WithName instance for foo should be in the deleted list
|
// check dir2: a WithName instance for foo should be in the deleted list
|
||||||
// of the snapshot diff for s2
|
// of the snapshot diff for s2
|
||||||
INodeDirectoryWithSnapshot dir2Node = (INodeDirectoryWithSnapshot) fsdir
|
INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
|
||||||
.getINode4Write(dir2.toString());
|
.asDirectory();
|
||||||
List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
|
List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
|
||||||
// dir2Node should contain 2 snapshot diffs, one for s2, and the other was
|
// dir2Node should contain 2 snapshot diffs, one for s2, and the other was
|
||||||
// originally s1 (created when dir2 was transformed to a snapshottable dir),
|
// originally s1 (created when dir2 was transformed to a snapshottable dir),
|
||||||
|
@ -2287,8 +2275,7 @@ public class TestRenameWithSnapshots {
|
||||||
// make sure the file under bar is deleted
|
// make sure the file under bar is deleted
|
||||||
final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
|
final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
|
||||||
"foo/bar");
|
"foo/bar");
|
||||||
INodeDirectoryWithSnapshot barNode = (INodeDirectoryWithSnapshot) fsdir
|
INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory();
|
||||||
.getINode(barInS0.toString());
|
|
||||||
assertEquals(0, barNode.getChildrenList(null).size());
|
assertEquals(0, barNode.getChildrenList(null).size());
|
||||||
List<DirectoryDiff> diffList = barNode.getDiffs().asList();
|
List<DirectoryDiff> diffList = barNode.getDiffs().asList();
|
||||||
assertEquals(1, diffList.size());
|
assertEquals(1, diffList.size());
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -92,12 +92,12 @@ public class TestSetQuotaWithSnapshot {
|
||||||
INodeDirectory subNode = INodeDirectory.valueOf(
|
INodeDirectory subNode = INodeDirectory.valueOf(
|
||||||
fsdir.getINode(sub.toString()), sub);
|
fsdir.getINode(sub.toString()), sub);
|
||||||
// subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
|
// subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
|
||||||
assertFalse(subNode instanceof INodeDirectoryWithSnapshot);
|
assertFalse(subNode.isWithSnapshot());
|
||||||
|
|
||||||
hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
|
||||||
subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
|
subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
|
||||||
assertTrue(subNode.isQuotaSet());
|
assertTrue(subNode.isQuotaSet());
|
||||||
assertFalse(subNode instanceof INodeDirectoryWithSnapshot);
|
assertFalse(subNode.isWithSnapshot());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -150,8 +150,8 @@ public class TestSetQuotaWithSnapshot {
|
||||||
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
|
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
|
||||||
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
|
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
|
||||||
INode subNode = fsdir.getINode4Write(subDir.toString());
|
INode subNode = fsdir.getINode4Write(subDir.toString());
|
||||||
assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
|
assertTrue(subNode.asDirectory().isWithSnapshot());
|
||||||
List<DirectoryDiff> diffList = ((INodeDirectoryWithSnapshot) subNode).getDiffs().asList();
|
List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
|
||||||
assertEquals(1, diffList.size());
|
assertEquals(1, diffList.size());
|
||||||
assertEquals("s2", Snapshot.getSnapshotName(diffList.get(0).snapshot));
|
assertEquals("s2", Snapshot.getSnapshotName(diffList.get(0).snapshot));
|
||||||
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
|
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
|
||||||
|
|
|
@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
|
||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
@ -311,9 +311,9 @@ public class TestSnapshotDeletion {
|
||||||
// make sure the whole subtree of sub is stored correctly in snapshot
|
// make sure the whole subtree of sub is stored correctly in snapshot
|
||||||
Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1",
|
Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1",
|
||||||
sub.getName());
|
sub.getName());
|
||||||
INodeDirectoryWithSnapshot snapshotNode4Sub =
|
INodeDirectory snapshotNode4Sub = fsdir.getINode(snapshotSub.toString())
|
||||||
(INodeDirectoryWithSnapshot) fsdir.getINode(snapshotSub.toString());
|
.asDirectory();
|
||||||
assertEquals(INodeDirectoryWithSnapshot.class, snapshotNode4Sub.getClass());
|
assertTrue(snapshotNode4Sub.isWithSnapshot());
|
||||||
// the snapshot copy of sub has only one child subsub.
|
// the snapshot copy of sub has only one child subsub.
|
||||||
// newFile should have been destroyed
|
// newFile should have been destroyed
|
||||||
assertEquals(1, snapshotNode4Sub.getChildrenList(null).size());
|
assertEquals(1, snapshotNode4Sub.getChildrenList(null).size());
|
||||||
|
@ -323,8 +323,7 @@ public class TestSnapshotDeletion {
|
||||||
// check the snapshot copy of subsub, which is contained in the subtree of
|
// check the snapshot copy of subsub, which is contained in the subtree of
|
||||||
// sub's snapshot copy
|
// sub's snapshot copy
|
||||||
INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(null).get(0);
|
INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(null).get(0);
|
||||||
assertEquals(INodeDirectoryWithSnapshot.class,
|
assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
|
||||||
snapshotNode4Subsub.getClass());
|
|
||||||
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
|
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
|
||||||
// check the children of subsub
|
// check the children of subsub
|
||||||
INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
|
INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
|
||||||
|
@ -478,8 +477,8 @@ public class TestSnapshotDeletion {
|
||||||
DirectoryDiffList diffList = dirNode.getDiffs();
|
DirectoryDiffList diffList = dirNode.getDiffs();
|
||||||
assertEquals(1, diffList.asList().size());
|
assertEquals(1, diffList.asList().size());
|
||||||
assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName());
|
assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName());
|
||||||
diffList = ((INodeDirectoryWithSnapshot) fsdir.getINode(
|
diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory()
|
||||||
metaChangeDir.toString())).getDiffs();
|
.getDiffs();
|
||||||
assertEquals(0, diffList.asList().size());
|
assertEquals(0, diffList.asList().size());
|
||||||
|
|
||||||
// check 2. noChangeDir and noChangeFile are still there
|
// check 2. noChangeDir and noChangeFile are still there
|
||||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
|
|
@ -237,6 +237,9 @@ Release 2.4.0 - UNRELEASED
|
||||||
MAPREDUCE-5656. bzip2 codec can drop records when reading data in splits
|
MAPREDUCE-5656. bzip2 codec can drop records when reading data in splits
|
||||||
(jlowe)
|
(jlowe)
|
||||||
|
|
||||||
|
MAPREDUCE-5623. TestJobCleanup fails because of RejectedExecutionException
|
||||||
|
and NPE. (jlowe)
|
||||||
|
|
||||||
Release 2.3.0 - UNRELEASED
|
Release 2.3.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -195,8 +195,7 @@ public class TestJobCleanup {
|
||||||
RunningJob job = jobClient.submitJob(jc);
|
RunningJob job = jobClient.submitJob(jc);
|
||||||
JobID id = job.getID();
|
JobID id = job.getID();
|
||||||
job.waitForCompletion();
|
job.waitForCompletion();
|
||||||
Counters counters = job.getCounters();
|
assertEquals("Job did not fail", JobStatus.FAILED, job.getJobState());
|
||||||
assertTrue("No. of failed maps should be 1",counters.getCounter(JobCounter.NUM_FAILED_MAPS) == 1);
|
|
||||||
|
|
||||||
if (fileName != null) {
|
if (fileName != null) {
|
||||||
Path testFile = new Path(outDir, fileName);
|
Path testFile = new Path(outDir, fileName);
|
||||||
|
@ -242,9 +241,7 @@ public class TestJobCleanup {
|
||||||
job.killJob(); // kill the job
|
job.killJob(); // kill the job
|
||||||
|
|
||||||
job.waitForCompletion(); // wait for the job to complete
|
job.waitForCompletion(); // wait for the job to complete
|
||||||
|
assertEquals("Job was not killed", JobStatus.KILLED, job.getJobState());
|
||||||
counters = job.getCounters();
|
|
||||||
assertTrue("No. of killed maps should be 1", counters.getCounter(JobCounter.NUM_KILLED_MAPS) == 1);
|
|
||||||
|
|
||||||
if (fileName != null) {
|
if (fileName != null) {
|
||||||
Path testFile = new Path(outDir, fileName);
|
Path testFile = new Path(outDir, fileName);
|
||||||
|
|
|
@ -168,6 +168,13 @@ Release 2.4.0 - UNRELEASED
|
||||||
YARN-1311. Fixed app specific scheduler-events' names to be app-attempt
|
YARN-1311. Fixed app specific scheduler-events' names to be app-attempt
|
||||||
based. (vinodkv via jianhe)
|
based. (vinodkv via jianhe)
|
||||||
|
|
||||||
|
YARN-1485. Modified RM HA configuration validation to also ensure that
|
||||||
|
service-address configuration are configured for every RM. (Xuan Gong via
|
||||||
|
vinodkv)
|
||||||
|
|
||||||
|
YARN-1435. Modified Distributed Shell to accept either the command or the
|
||||||
|
custom script. (Xuan Gong via zjshen)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -239,6 +246,12 @@ Release 2.4.0 - UNRELEASED
|
||||||
YARN-1405. Fixed ResourceManager to not hang when init/start fails with an
|
YARN-1405. Fixed ResourceManager to not hang when init/start fails with an
|
||||||
exception w.r.t state-store. (Jian He via vinodkv)
|
exception w.r.t state-store. (Jian He via vinodkv)
|
||||||
|
|
||||||
|
YARN-1505. Fixed Webapplication proxy server to not hardcode its bind
|
||||||
|
address. (Xuan Gong via vinodkv)
|
||||||
|
|
||||||
|
YARN-1145. Fixed a potential file-handle leak in the web interface for
|
||||||
|
displaying aggregated logs. (Rohith Sharma via vinodkv)
|
||||||
|
|
||||||
Release 2.3.0 - UNRELEASED
|
Release 2.3.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -58,13 +58,17 @@ public class HAUtil {
|
||||||
*/
|
*/
|
||||||
public static void verifyAndSetConfiguration(Configuration conf)
|
public static void verifyAndSetConfiguration(Configuration conf)
|
||||||
throws YarnRuntimeException {
|
throws YarnRuntimeException {
|
||||||
verifyAndSetRMHAIds(conf);
|
verifyAndSetRMHAIdsList(conf);
|
||||||
verifyAndSetRMHAId(conf);
|
verifyAndSetCurrentRMHAId(conf);
|
||||||
verifyAndSetAllServiceAddresses(conf);
|
verifyAndSetAllServiceAddresses(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
private static void verifyAndSetRMHAIds(Configuration conf) {
|
* Verify configuration that there are at least two RM-ids
|
||||||
|
* and RPC addresses are specified for each RM-id.
|
||||||
|
* Then set the RM-ids.
|
||||||
|
*/
|
||||||
|
private static void verifyAndSetRMHAIdsList(Configuration conf) {
|
||||||
Collection<String> ids =
|
Collection<String> ids =
|
||||||
conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS);
|
conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS);
|
||||||
if (ids.size() < 2) {
|
if (ids.size() < 2) {
|
||||||
|
@ -76,6 +80,24 @@ public class HAUtil {
|
||||||
|
|
||||||
StringBuilder setValue = new StringBuilder();
|
StringBuilder setValue = new StringBuilder();
|
||||||
for (String id: ids) {
|
for (String id: ids) {
|
||||||
|
// verify the RM service addresses configurations for every RMIds
|
||||||
|
for (String prefix : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||||
|
String confKey = null;
|
||||||
|
try {
|
||||||
|
confKey = addSuffix(prefix, id);
|
||||||
|
if (conf.getTrimmed(confKey) == null) {
|
||||||
|
throwBadConfigurationException(getNeedToSetValueMessage(confKey));
|
||||||
|
}
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
String errmsg = iae.getMessage();
|
||||||
|
if (confKey == null) {
|
||||||
|
// Error at addSuffix
|
||||||
|
errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID,
|
||||||
|
getRMHAId(conf));
|
||||||
|
}
|
||||||
|
throwBadConfigurationException(errmsg);
|
||||||
|
}
|
||||||
|
}
|
||||||
setValue.append(id);
|
setValue.append(id);
|
||||||
setValue.append(",");
|
setValue.append(",");
|
||||||
}
|
}
|
||||||
|
@ -83,7 +105,7 @@ public class HAUtil {
|
||||||
setValue.substring(0, setValue.length() - 1));
|
setValue.substring(0, setValue.length() - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void verifyAndSetRMHAId(Configuration conf) {
|
private static void verifyAndSetCurrentRMHAId(Configuration conf) {
|
||||||
String rmId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
|
String rmId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
|
||||||
if (rmId == null) {
|
if (rmId == null) {
|
||||||
throwBadConfigurationException(
|
throwBadConfigurationException(
|
||||||
|
|
|
@ -218,13 +218,14 @@ public class ApplicationMaster {
|
||||||
private long shellScriptPathLen = 0;
|
private long shellScriptPathLen = 0;
|
||||||
|
|
||||||
// Hardcoded path to shell script in launch container's local env
|
// Hardcoded path to shell script in launch container's local env
|
||||||
private final String ExecShellStringPath = "ExecShellScript.sh";
|
private static final String ExecShellStringPath = "ExecShellScript.sh";
|
||||||
|
private static final String ExecBatScripStringtPath = "ExecBatScript.bat";
|
||||||
|
|
||||||
// Hardcoded path to custom log_properties
|
// Hardcoded path to custom log_properties
|
||||||
private final String log4jPath = "log4j.properties";
|
private static final String log4jPath = "log4j.properties";
|
||||||
|
|
||||||
private final String shellCommandPath = "shellCommands";
|
private static final String shellCommandPath = "shellCommands";
|
||||||
private final String shellArgsPath = "shellArgs";
|
private static final String shellArgsPath = "shellArgs";
|
||||||
|
|
||||||
private volatile boolean done;
|
private volatile boolean done;
|
||||||
private volatile boolean success;
|
private volatile boolean success;
|
||||||
|
@ -234,6 +235,9 @@ public class ApplicationMaster {
|
||||||
// Launch threads
|
// Launch threads
|
||||||
private List<Thread> launchThreads = new ArrayList<Thread>();
|
private List<Thread> launchThreads = new ArrayList<Thread>();
|
||||||
|
|
||||||
|
private final String linux_bash_command = "bash";
|
||||||
|
private final String windows_command = "cmd /c";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param args Command line args
|
* @param args Command line args
|
||||||
*/
|
*/
|
||||||
|
@ -308,8 +312,6 @@ public class ApplicationMaster {
|
||||||
Options opts = new Options();
|
Options opts = new Options();
|
||||||
opts.addOption("app_attempt_id", true,
|
opts.addOption("app_attempt_id", true,
|
||||||
"App Attempt ID. Not to be used unless for testing purposes");
|
"App Attempt ID. Not to be used unless for testing purposes");
|
||||||
opts.addOption("shell_script", true,
|
|
||||||
"Location of the shell script to be executed");
|
|
||||||
opts.addOption("shell_env", true,
|
opts.addOption("shell_env", true,
|
||||||
"Environment for shell script. Specified as env_key=env_val pairs");
|
"Environment for shell script. Specified as env_key=env_val pairs");
|
||||||
opts.addOption("container_memory", true,
|
opts.addOption("container_memory", true,
|
||||||
|
@ -387,11 +389,15 @@ public class ApplicationMaster {
|
||||||
+ appAttemptID.getApplicationId().getClusterTimestamp()
|
+ appAttemptID.getApplicationId().getClusterTimestamp()
|
||||||
+ ", attemptId=" + appAttemptID.getAttemptId());
|
+ ", attemptId=" + appAttemptID.getAttemptId());
|
||||||
|
|
||||||
if (!fileExist(shellCommandPath)) {
|
if (!fileExist(shellCommandPath)
|
||||||
|
&& envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION).isEmpty()) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"No shell command specified to be executed by application master");
|
"No shell command or shell script specified to be executed by application master");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fileExist(shellCommandPath)) {
|
||||||
|
shellCommand = readContent(shellCommandPath);
|
||||||
}
|
}
|
||||||
shellCommand = readContent(shellCommandPath);
|
|
||||||
|
|
||||||
if (fileExist(shellArgsPath)) {
|
if (fileExist(shellArgsPath)) {
|
||||||
shellArgs = readContent(shellArgsPath);
|
shellArgs = readContent(shellArgsPath);
|
||||||
|
@ -847,7 +853,9 @@ public class ApplicationMaster {
|
||||||
}
|
}
|
||||||
shellRsrc.setTimestamp(shellScriptPathTimestamp);
|
shellRsrc.setTimestamp(shellScriptPathTimestamp);
|
||||||
shellRsrc.setSize(shellScriptPathLen);
|
shellRsrc.setSize(shellScriptPathLen);
|
||||||
localResources.put(ExecShellStringPath, shellRsrc);
|
localResources.put(Shell.WINDOWS ? ExecBatScripStringtPath :
|
||||||
|
ExecShellStringPath, shellRsrc);
|
||||||
|
shellCommand = Shell.WINDOWS ? windows_command : linux_bash_command;
|
||||||
}
|
}
|
||||||
ctx.setLocalResources(localResources);
|
ctx.setLocalResources(localResources);
|
||||||
|
|
||||||
|
@ -858,7 +866,8 @@ public class ApplicationMaster {
|
||||||
vargs.add(shellCommand);
|
vargs.add(shellCommand);
|
||||||
// Set shell script path
|
// Set shell script path
|
||||||
if (!shellScriptPath.isEmpty()) {
|
if (!shellScriptPath.isEmpty()) {
|
||||||
vargs.add(ExecShellStringPath);
|
vargs.add(Shell.WINDOWS ? ExecBatScripStringtPath
|
||||||
|
: ExecShellStringPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set args for the shell command if any
|
// Set args for the shell command if any
|
||||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.io.DataOutputBuffer;
|
||||||
import org.apache.hadoop.security.Credentials;
|
import org.apache.hadoop.security.Credentials;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
import org.apache.hadoop.util.Shell;
|
||||||
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
|
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
|
||||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||||
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
|
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
|
||||||
|
@ -167,11 +168,14 @@ public class Client {
|
||||||
// Command line options
|
// Command line options
|
||||||
private Options opts;
|
private Options opts;
|
||||||
|
|
||||||
private final String shellCommandPath = "shellCommands";
|
private static final String shellCommandPath = "shellCommands";
|
||||||
private final String shellArgsPath = "shellArgs";
|
private static final String shellArgsPath = "shellArgs";
|
||||||
private final String appMasterJarPath = "AppMaster.jar";
|
private static final String appMasterJarPath = "AppMaster.jar";
|
||||||
// Hardcoded path to custom log_properties
|
// Hardcoded path to custom log_properties
|
||||||
private final String log4jPath = "log4j.properties";
|
private static final String log4jPath = "log4j.properties";
|
||||||
|
|
||||||
|
private static final String linuxShellPath = "ExecShellScript.sh";
|
||||||
|
private static final String windowBatPath = "ExecBatScript.bat";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param args Command line arguments
|
* @param args Command line arguments
|
||||||
|
@ -225,8 +229,11 @@ public class Client {
|
||||||
opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master");
|
opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master");
|
||||||
opts.addOption("master_vcores", true, "Amount of virtual cores to be requested to run the application master");
|
opts.addOption("master_vcores", true, "Amount of virtual cores to be requested to run the application master");
|
||||||
opts.addOption("jar", true, "Jar file containing the application master");
|
opts.addOption("jar", true, "Jar file containing the application master");
|
||||||
opts.addOption("shell_command", true, "Shell command to be executed by the Application Master");
|
opts.addOption("shell_command", true, "Shell command to be executed by " +
|
||||||
opts.addOption("shell_script", true, "Location of the shell script to be executed");
|
"the Application Master. Can only specify either --shell_command " +
|
||||||
|
"or --shell_script");
|
||||||
|
opts.addOption("shell_script", true, "Location of the shell script to be " +
|
||||||
|
"executed. Can only specify either --shell_command or --shell_script");
|
||||||
opts.addOption("shell_args", true, "Command line args for the shell script." +
|
opts.addOption("shell_args", true, "Command line args for the shell script." +
|
||||||
"Multiple args can be separated by empty space.");
|
"Multiple args can be separated by empty space.");
|
||||||
opts.getOption("shell_args").setArgs(Option.UNLIMITED_VALUES);
|
opts.getOption("shell_args").setArgs(Option.UNLIMITED_VALUES);
|
||||||
|
@ -308,12 +315,15 @@ public class Client {
|
||||||
|
|
||||||
appMasterJar = cliParser.getOptionValue("jar");
|
appMasterJar = cliParser.getOptionValue("jar");
|
||||||
|
|
||||||
if (!cliParser.hasOption("shell_command")) {
|
if (!cliParser.hasOption("shell_command") && !cliParser.hasOption("shell_script")) {
|
||||||
throw new IllegalArgumentException("No shell command specified to be executed by application master");
|
throw new IllegalArgumentException(
|
||||||
}
|
"No shell command or shell script specified to be executed by application master");
|
||||||
shellCommand = cliParser.getOptionValue("shell_command");
|
} else if (cliParser.hasOption("shell_command") && cliParser.hasOption("shell_script")) {
|
||||||
|
throw new IllegalArgumentException("Can not specify shell_command option " +
|
||||||
if (cliParser.hasOption("shell_script")) {
|
"and shell_script option at the same time");
|
||||||
|
} else if (cliParser.hasOption("shell_command")) {
|
||||||
|
shellCommand = cliParser.getOptionValue("shell_command");
|
||||||
|
} else {
|
||||||
shellScriptPath = cliParser.getOptionValue("shell_script");
|
shellScriptPath = cliParser.getOptionValue("shell_script");
|
||||||
}
|
}
|
||||||
if (cliParser.hasOption("shell_args")) {
|
if (cliParser.hasOption("shell_args")) {
|
||||||
|
@ -466,8 +476,11 @@ public class Client {
|
||||||
long hdfsShellScriptTimestamp = 0;
|
long hdfsShellScriptTimestamp = 0;
|
||||||
if (!shellScriptPath.isEmpty()) {
|
if (!shellScriptPath.isEmpty()) {
|
||||||
Path shellSrc = new Path(shellScriptPath);
|
Path shellSrc = new Path(shellScriptPath);
|
||||||
String shellPathSuffix = appName + "/" + appId.getId() + "/ExecShellScript.sh";
|
String shellPathSuffix =
|
||||||
Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
|
appName + "/" + appId.getId() + "/"
|
||||||
|
+ (Shell.WINDOWS ? windowBatPath : linuxShellPath);
|
||||||
|
Path shellDst =
|
||||||
|
new Path(fs.getHomeDirectory(), shellPathSuffix);
|
||||||
fs.copyFromLocalFile(false, true, shellSrc, shellDst);
|
fs.copyFromLocalFile(false, true, shellSrc, shellDst);
|
||||||
hdfsShellScriptLocation = shellDst.toUri().toString();
|
hdfsShellScriptLocation = shellDst.toUri().toString();
|
||||||
FileStatus shellFileStatus = fs.getFileStatus(shellDst);
|
FileStatus shellFileStatus = fs.getFileStatus(shellDst);
|
||||||
|
|
|
@ -303,6 +303,54 @@ public class TestDistributedShell {
|
||||||
verifyContainerLog(4, expectedContent, false, "");
|
verifyContainerLog(4, expectedContent, false, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout=90000)
|
||||||
|
public void testDSShellWithShellScript() throws Exception {
|
||||||
|
final File basedir =
|
||||||
|
new File("target", TestDistributedShell.class.getName());
|
||||||
|
final File tmpDir = new File(basedir, "tmpDir");
|
||||||
|
tmpDir.mkdirs();
|
||||||
|
final File customShellScript = new File(tmpDir, "custom_script.sh");
|
||||||
|
if (customShellScript.exists()) {
|
||||||
|
customShellScript.delete();
|
||||||
|
}
|
||||||
|
if (!customShellScript.createNewFile()) {
|
||||||
|
Assert.fail("Can not create custom shell script file.");
|
||||||
|
}
|
||||||
|
PrintWriter fileWriter = new PrintWriter(customShellScript);
|
||||||
|
// set the output to DEBUG level
|
||||||
|
fileWriter.write("echo testDSShellWithShellScript");
|
||||||
|
fileWriter.close();
|
||||||
|
System.out.println(customShellScript.getAbsolutePath());
|
||||||
|
String[] args = {
|
||||||
|
"--jar",
|
||||||
|
APPMASTER_JAR,
|
||||||
|
"--num_containers",
|
||||||
|
"1",
|
||||||
|
"--shell_script",
|
||||||
|
customShellScript.getAbsolutePath(),
|
||||||
|
"--master_memory",
|
||||||
|
"512",
|
||||||
|
"--master_vcores",
|
||||||
|
"2",
|
||||||
|
"--container_memory",
|
||||||
|
"128",
|
||||||
|
"--container_vcores",
|
||||||
|
"1"
|
||||||
|
};
|
||||||
|
|
||||||
|
LOG.info("Initializing DS Client");
|
||||||
|
final Client client =
|
||||||
|
new Client(new Configuration(yarnCluster.getConfig()));
|
||||||
|
boolean initSuccess = client.init(args);
|
||||||
|
Assert.assertTrue(initSuccess);
|
||||||
|
LOG.info("Running DS Client");
|
||||||
|
boolean result = client.run();
|
||||||
|
LOG.info("Client run completed. Result=" + result);
|
||||||
|
List<String> expectedContent = new ArrayList<String>();
|
||||||
|
expectedContent.add("testDSShellWithShellScript");
|
||||||
|
verifyContainerLog(1, expectedContent, false, "");
|
||||||
|
}
|
||||||
|
|
||||||
@Test(timeout=90000)
|
@Test(timeout=90000)
|
||||||
public void testDSShellWithInvalidArgs() throws Exception {
|
public void testDSShellWithInvalidArgs() throws Exception {
|
||||||
Client client = new Client(new Configuration(yarnCluster.getConfig()));
|
Client client = new Client(new Configuration(yarnCluster.getConfig()));
|
||||||
|
@ -399,6 +447,58 @@ public class TestDistributedShell {
|
||||||
Assert.assertTrue("The throw exception is not expected",
|
Assert.assertTrue("The throw exception is not expected",
|
||||||
e.getMessage().contains("Invalid virtual cores specified"));
|
e.getMessage().contains("Invalid virtual cores specified"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG.info("Initializing DS Client with --shell_command and --shell_script");
|
||||||
|
try {
|
||||||
|
String[] args = {
|
||||||
|
"--jar",
|
||||||
|
APPMASTER_JAR,
|
||||||
|
"--num_containers",
|
||||||
|
"2",
|
||||||
|
"--shell_command",
|
||||||
|
Shell.WINDOWS ? "dir" : "ls",
|
||||||
|
"--master_memory",
|
||||||
|
"512",
|
||||||
|
"--master_vcores",
|
||||||
|
"2",
|
||||||
|
"--container_memory",
|
||||||
|
"128",
|
||||||
|
"--container_vcores",
|
||||||
|
"1",
|
||||||
|
"--shell_script",
|
||||||
|
"test.sh"
|
||||||
|
};
|
||||||
|
client.init(args);
|
||||||
|
Assert.fail("Exception is expected");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
Assert.assertTrue("The throw exception is not expected",
|
||||||
|
e.getMessage().contains("Can not specify shell_command option " +
|
||||||
|
"and shell_script option at the same time"));
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG.info("Initializing DS Client without --shell_command and --shell_script");
|
||||||
|
try {
|
||||||
|
String[] args = {
|
||||||
|
"--jar",
|
||||||
|
APPMASTER_JAR,
|
||||||
|
"--num_containers",
|
||||||
|
"2",
|
||||||
|
"--master_memory",
|
||||||
|
"512",
|
||||||
|
"--master_vcores",
|
||||||
|
"2",
|
||||||
|
"--container_memory",
|
||||||
|
"128",
|
||||||
|
"--container_vcores",
|
||||||
|
"1"
|
||||||
|
};
|
||||||
|
client.init(args);
|
||||||
|
Assert.fail("Exception is expected");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
Assert.assertTrue("The throw exception is not expected",
|
||||||
|
e.getMessage().contains("No shell command or shell script specified " +
|
||||||
|
"to be executed by application master"));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static void waitForNMToRegister(NodeManager nm)
|
protected static void waitForNMToRegister(NodeManager nm)
|
||||||
|
@ -490,10 +590,10 @@ public class TestDistributedShell {
|
||||||
for (File output : containerFiles[i].listFiles()) {
|
for (File output : containerFiles[i].listFiles()) {
|
||||||
if (output.getName().trim().contains("stdout")) {
|
if (output.getName().trim().contains("stdout")) {
|
||||||
BufferedReader br = null;
|
BufferedReader br = null;
|
||||||
|
List<String> stdOutContent = new ArrayList<String>();
|
||||||
try {
|
try {
|
||||||
|
|
||||||
String sCurrentLine;
|
String sCurrentLine;
|
||||||
|
|
||||||
br = new BufferedReader(new FileReader(output));
|
br = new BufferedReader(new FileReader(output));
|
||||||
int numOfline = 0;
|
int numOfline = 0;
|
||||||
while ((sCurrentLine = br.readLine()) != null) {
|
while ((sCurrentLine = br.readLine()) != null) {
|
||||||
|
@ -502,12 +602,25 @@ public class TestDistributedShell {
|
||||||
numOfWords++;
|
numOfWords++;
|
||||||
}
|
}
|
||||||
} else if (output.getName().trim().equals("stdout")){
|
} else if (output.getName().trim().equals("stdout")){
|
||||||
Assert.assertEquals("The current is" + sCurrentLine,
|
if (! Shell.WINDOWS) {
|
||||||
expectedContent.get(numOfline), sCurrentLine.trim());
|
Assert.assertEquals("The current is" + sCurrentLine,
|
||||||
numOfline++;
|
expectedContent.get(numOfline), sCurrentLine.trim());
|
||||||
|
numOfline++;
|
||||||
|
} else {
|
||||||
|
stdOutContent.add(sCurrentLine.trim());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* By executing bat script using cmd /c,
|
||||||
|
* it will output all contents from bat script first
|
||||||
|
* It is hard for us to do check line by line
|
||||||
|
* Simply check whether output from bat file contains
|
||||||
|
* all the expected messages
|
||||||
|
*/
|
||||||
|
if (Shell.WINDOWS && !count
|
||||||
|
&& output.getName().trim().equals("stdout")) {
|
||||||
|
Assert.assertTrue(stdOutContent.containsAll(expectedContent));
|
||||||
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -523,6 +636,5 @@ public class TestDistributedShell {
|
||||||
}
|
}
|
||||||
return numOfWords;
|
return numOfWords;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ import org.apache.hadoop.fs.FileContext;
|
||||||
import org.apache.hadoop.fs.Options;
|
import org.apache.hadoop.fs.Options;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.SecureIOUtils;
|
import org.apache.hadoop.io.SecureIOUtils;
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.Writable;
|
||||||
import org.apache.hadoop.io.file.tfile.TFile;
|
import org.apache.hadoop.io.file.tfile.TFile;
|
||||||
|
@ -294,7 +295,7 @@ public class AggregatedLogFormat {
|
||||||
out.close();
|
out.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void closeWriter() {
|
public void close() {
|
||||||
try {
|
try {
|
||||||
this.writer.close();
|
this.writer.close();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
@ -569,9 +570,8 @@ public class AggregatedLogFormat {
|
||||||
out.println("");
|
out.println("");
|
||||||
}
|
}
|
||||||
|
|
||||||
public void close() throws IOException {
|
public void close() {
|
||||||
this.scanner.close();
|
IOUtils.cleanup(LOG, scanner, reader, fsDataIStream);
|
||||||
this.fsDataIStream.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,109 +59,113 @@ public class AggregatedLogsBlock extends HtmlBlock {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void render(Block html) {
|
protected void render(Block html) {
|
||||||
ContainerId containerId = verifyAndGetContainerId(html);
|
|
||||||
NodeId nodeId = verifyAndGetNodeId(html);
|
|
||||||
String appOwner = verifyAndGetAppOwner(html);
|
|
||||||
LogLimits logLimits = verifyAndGetLogLimits(html);
|
|
||||||
if (containerId == null || nodeId == null || appOwner == null
|
|
||||||
|| appOwner.isEmpty() || logLimits == null) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ApplicationId applicationId =
|
|
||||||
containerId.getApplicationAttemptId().getApplicationId();
|
|
||||||
String logEntity = $(ENTITY_STRING);
|
|
||||||
if (logEntity == null || logEntity.isEmpty()) {
|
|
||||||
logEntity = containerId.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
|
|
||||||
YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
|
|
||||||
html.h1()
|
|
||||||
._("Aggregation is not enabled. Try the nodemanager at " + nodeId)
|
|
||||||
._();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Path remoteRootLogDir =
|
|
||||||
new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
|
|
||||||
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
|
|
||||||
AggregatedLogFormat.LogReader reader = null;
|
AggregatedLogFormat.LogReader reader = null;
|
||||||
try {
|
try {
|
||||||
reader =
|
ContainerId containerId = verifyAndGetContainerId(html);
|
||||||
new AggregatedLogFormat.LogReader(conf,
|
NodeId nodeId = verifyAndGetNodeId(html);
|
||||||
LogAggregationUtils.getRemoteNodeLogFileForApp(
|
String appOwner = verifyAndGetAppOwner(html);
|
||||||
remoteRootLogDir, applicationId, appOwner, nodeId,
|
LogLimits logLimits = verifyAndGetLogLimits(html);
|
||||||
LogAggregationUtils.getRemoteNodeLogDirSuffix(conf)));
|
if (containerId == null || nodeId == null || appOwner == null
|
||||||
} catch (FileNotFoundException e) {
|
|| appOwner.isEmpty() || logLimits == null) {
|
||||||
// ACLs not available till the log file is opened.
|
|
||||||
html.h1()
|
|
||||||
._("Logs not available for "
|
|
||||||
+ logEntity
|
|
||||||
+ ". Aggregation may not be complete, "
|
|
||||||
+ "Check back later or try the nodemanager at "
|
|
||||||
+ nodeId)._();
|
|
||||||
return;
|
|
||||||
} catch (IOException e) {
|
|
||||||
html.h1()._("Error getting logs for " + logEntity)._();
|
|
||||||
LOG.error("Error getting logs for " + logEntity, e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
String owner = null;
|
|
||||||
Map<ApplicationAccessType, String> appAcls = null;
|
|
||||||
try {
|
|
||||||
owner = reader.getApplicationOwner();
|
|
||||||
appAcls = reader.getApplicationAcls();
|
|
||||||
} catch (IOException e) {
|
|
||||||
html.h1()._("Error getting logs for " + logEntity)._();
|
|
||||||
LOG.error("Error getting logs for " + logEntity, e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf);
|
|
||||||
aclsManager.addApplication(applicationId, appAcls);
|
|
||||||
|
|
||||||
String remoteUser = request().getRemoteUser();
|
|
||||||
UserGroupInformation callerUGI = null;
|
|
||||||
if (remoteUser != null) {
|
|
||||||
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
|
|
||||||
}
|
|
||||||
if (callerUGI != null
|
|
||||||
&& !aclsManager.checkAccess(callerUGI, ApplicationAccessType.VIEW_APP,
|
|
||||||
owner, applicationId)) {
|
|
||||||
html.h1()
|
|
||||||
._("User [" + remoteUser
|
|
||||||
+ "] is not authorized to view the logs for " + logEntity)._();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
String desiredLogType = $(CONTAINER_LOG_TYPE);
|
|
||||||
try {
|
|
||||||
AggregatedLogFormat.ContainerLogsReader logReader =
|
|
||||||
reader.getContainerLogsReader(containerId);
|
|
||||||
if (logReader == null) {
|
|
||||||
html.h1()._(
|
|
||||||
"Logs not available for " + logEntity
|
|
||||||
+ ". Could be caused by the rentention policy")._();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean foundLog = readContainerLogs(html, logReader, logLimits,
|
ApplicationId applicationId = containerId.getApplicationAttemptId()
|
||||||
desiredLogType);
|
.getApplicationId();
|
||||||
|
String logEntity = $(ENTITY_STRING);
|
||||||
|
if (logEntity == null || logEntity.isEmpty()) {
|
||||||
|
logEntity = containerId.toString();
|
||||||
|
}
|
||||||
|
|
||||||
if (!foundLog) {
|
if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
|
||||||
if (desiredLogType.isEmpty()) {
|
YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
|
||||||
html.h1("No logs available for container " + containerId.toString());
|
html.h1()
|
||||||
} else {
|
._("Aggregation is not enabled. Try the nodemanager at " + nodeId)
|
||||||
html.h1("Unable to locate '" + desiredLogType
|
._();
|
||||||
+ "' log for container " + containerId.toString());
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Path remoteRootLogDir = new Path(conf.get(
|
||||||
|
YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
|
||||||
|
YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
|
||||||
|
|
||||||
|
try {
|
||||||
|
reader = new AggregatedLogFormat.LogReader(conf,
|
||||||
|
LogAggregationUtils.getRemoteNodeLogFileForApp(remoteRootLogDir,
|
||||||
|
applicationId, appOwner, nodeId,
|
||||||
|
LogAggregationUtils.getRemoteNodeLogDirSuffix(conf)));
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
// ACLs not available till the log file is opened.
|
||||||
|
html.h1()
|
||||||
|
._("Logs not available for " + logEntity
|
||||||
|
+ ". Aggregation may not be complete, "
|
||||||
|
+ "Check back later or try the nodemanager at " + nodeId)._();
|
||||||
|
return;
|
||||||
|
} catch (IOException e) {
|
||||||
|
html.h1()._("Error getting logs for " + logEntity)._();
|
||||||
|
LOG.error("Error getting logs for " + logEntity, e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
String owner = null;
|
||||||
|
Map<ApplicationAccessType, String> appAcls = null;
|
||||||
|
try {
|
||||||
|
owner = reader.getApplicationOwner();
|
||||||
|
appAcls = reader.getApplicationAcls();
|
||||||
|
} catch (IOException e) {
|
||||||
|
html.h1()._("Error getting logs for " + logEntity)._();
|
||||||
|
LOG.error("Error getting logs for " + logEntity, e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ApplicationACLsManager aclsManager = new ApplicationACLsManager(conf);
|
||||||
|
aclsManager.addApplication(applicationId, appAcls);
|
||||||
|
|
||||||
|
String remoteUser = request().getRemoteUser();
|
||||||
|
UserGroupInformation callerUGI = null;
|
||||||
|
if (remoteUser != null) {
|
||||||
|
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
|
||||||
|
}
|
||||||
|
if (callerUGI != null
|
||||||
|
&& !aclsManager.checkAccess(callerUGI,
|
||||||
|
ApplicationAccessType.VIEW_APP, owner, applicationId)) {
|
||||||
|
html.h1()
|
||||||
|
._("User [" + remoteUser
|
||||||
|
+ "] is not authorized to view the logs for " + logEntity)._();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
String desiredLogType = $(CONTAINER_LOG_TYPE);
|
||||||
|
try {
|
||||||
|
AggregatedLogFormat.ContainerLogsReader logReader = reader
|
||||||
|
.getContainerLogsReader(containerId);
|
||||||
|
if (logReader == null) {
|
||||||
|
html.h1()
|
||||||
|
._("Logs not available for " + logEntity
|
||||||
|
+ ". Could be caused by the rentention policy")._();
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boolean foundLog = readContainerLogs(html, logReader, logLimits,
|
||||||
|
desiredLogType);
|
||||||
|
|
||||||
|
if (!foundLog) {
|
||||||
|
if (desiredLogType.isEmpty()) {
|
||||||
|
html.h1("No logs available for container " + containerId.toString());
|
||||||
|
} else {
|
||||||
|
html.h1("Unable to locate '" + desiredLogType
|
||||||
|
+ "' log for container " + containerId.toString());
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
html.h1()._("Error getting logs for " + logEntity)._();
|
||||||
|
LOG.error("Error getting logs for " + logEntity, e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} finally {
|
||||||
html.h1()._("Error getting logs for " + logEntity)._();
|
if (reader != null) {
|
||||||
LOG.error("Error getting logs for " + logEntity, e);
|
reader.close();
|
||||||
return;
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ public class TestHAUtil {
|
||||||
private static final String RM1_ADDRESS_UNTRIMMED = " \t\t\n 1.2.3.4:8021 \n\t ";
|
private static final String RM1_ADDRESS_UNTRIMMED = " \t\t\n 1.2.3.4:8021 \n\t ";
|
||||||
private static final String RM1_ADDRESS = RM1_ADDRESS_UNTRIMMED.trim();
|
private static final String RM1_ADDRESS = RM1_ADDRESS_UNTRIMMED.trim();
|
||||||
private static final String RM2_ADDRESS = "localhost:8022";
|
private static final String RM2_ADDRESS = "localhost:8022";
|
||||||
|
private static final String RM3_ADDRESS = "localhost:8033";
|
||||||
private static final String RM1_NODE_ID_UNTRIMMED = "rm1 ";
|
private static final String RM1_NODE_ID_UNTRIMMED = "rm1 ";
|
||||||
private static final String RM1_NODE_ID = RM1_NODE_ID_UNTRIMMED.trim();
|
private static final String RM1_NODE_ID = RM1_NODE_ID_UNTRIMMED.trim();
|
||||||
private static final String RM2_NODE_ID = "rm2";
|
private static final String RM2_NODE_ID = "rm2";
|
||||||
|
@ -113,8 +114,13 @@ public class TestHAUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
conf.clear();
|
conf.clear();
|
||||||
conf.set(YarnConfiguration.RM_HA_IDS, RM_INVALID_NODE_ID + ","
|
// simulate the case YarnConfiguration.RM_HA_ID is not set
|
||||||
+ RM1_NODE_ID);
|
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
|
||||||
|
+ RM2_NODE_ID);
|
||||||
|
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||||
|
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
|
||||||
|
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
HAUtil.verifyAndSetConfiguration(conf);
|
HAUtil.verifyAndSetConfiguration(conf);
|
||||||
} catch (YarnRuntimeException e) {
|
} catch (YarnRuntimeException e) {
|
||||||
|
@ -165,6 +171,7 @@ public class TestHAUtil {
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
|
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||||
|
conf.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
HAUtil.verifyAndSetConfiguration(conf);
|
HAUtil.verifyAndSetConfiguration(conf);
|
||||||
|
|
|
@ -114,7 +114,7 @@ public class TestAggregatedLogFormat {
|
||||||
testContainerId, ugi.getShortUserName());
|
testContainerId, ugi.getShortUserName());
|
||||||
|
|
||||||
logWriter.append(logKey, logValue);
|
logWriter.append(logKey, logValue);
|
||||||
logWriter.closeWriter();
|
logWriter.close();
|
||||||
|
|
||||||
// make sure permission are correct on the file
|
// make sure permission are correct on the file
|
||||||
FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile);
|
FileStatus fsStatus = fs.getFileStatus(remoteAppLogFile);
|
||||||
|
@ -194,7 +194,7 @@ public class TestAggregatedLogFormat {
|
||||||
ugi.getShortUserName());
|
ugi.getShortUserName());
|
||||||
logWriter.append(logKey, logValue);
|
logWriter.append(logKey, logValue);
|
||||||
|
|
||||||
logWriter.closeWriter();
|
logWriter.close();
|
||||||
|
|
||||||
BufferedReader in =
|
BufferedReader in =
|
||||||
new BufferedReader(new FileReader(new File(remoteAppLogFile
|
new BufferedReader(new FileReader(new File(remoteAppLogFile
|
||||||
|
|
|
@ -229,7 +229,7 @@ public class TestAggregatedLogsBlock {
|
||||||
|
|
||||||
writer.append(new AggregatedLogFormat.LogKey("container_0_0001_01_000001"),
|
writer.append(new AggregatedLogFormat.LogKey("container_0_0001_01_000001"),
|
||||||
new AggregatedLogFormat.LogValue(rootLogDirs, containerId,UserGroupInformation.getCurrentUser().getShortUserName()));
|
new AggregatedLogFormat.LogValue(rootLogDirs, containerId,UserGroupInformation.getCurrentUser().getShortUserName()));
|
||||||
writer.closeWriter();
|
writer.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void writeLogs(String dirName) throws Exception {
|
private void writeLogs(String dirName) throws Exception {
|
||||||
|
|
|
@ -178,7 +178,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
|
||||||
localAppLogDirs);
|
localAppLogDirs);
|
||||||
|
|
||||||
if (this.writer != null) {
|
if (this.writer != null) {
|
||||||
this.writer.closeWriter();
|
this.writer.close();
|
||||||
LOG.info("Finished aggregate log-file for app " + this.applicationId);
|
LOG.info("Finished aggregate log-file for app " + this.applicationId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -143,7 +143,9 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
|
||||||
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_ADDRESS, hostPort);
|
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_ADDRESS, hostPort);
|
||||||
conf.set(YarnConfiguration.RM_HA_ID, rmId);
|
conf.set(YarnConfiguration.RM_HA_ID, rmId);
|
||||||
for (String rpcAddress : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String rpcAddress : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||||
conf.set(HAUtil.addSuffix(rpcAddress, rmId), "localhost:0");
|
for (String id : HAUtil.getRMHAIds(conf)) {
|
||||||
|
conf.set(HAUtil.addSuffix(rpcAddress, id), "localhost:0");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_ADMIN_ADDRESS, rmId),
|
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_ADMIN_ADDRESS, rmId),
|
||||||
"localhost:" + adminPort);
|
"localhost:" + adminPort);
|
||||||
|
|
|
@ -296,7 +296,9 @@ public class MiniYARNCluster extends CompositeService {
|
||||||
String hostname = MiniYARNCluster.getHostname();
|
String hostname = MiniYARNCluster.getHostname();
|
||||||
conf.set(YarnConfiguration.RM_HA_ID, rmId);
|
conf.set(YarnConfiguration.RM_HA_ID, rmId);
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||||
conf.set(HAUtil.addSuffix(confKey, rmId), hostname + ":0");
|
for (String id : HAUtil.getRMHAIds(conf)) {
|
||||||
|
conf.set(HAUtil.addSuffix(confKey, id), hostname + ":0");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,8 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
public class WebAppProxy extends AbstractService {
|
public class WebAppProxy extends AbstractService {
|
||||||
public static final String FETCHER_ATTRIBUTE= "AppUrlFetcher";
|
public static final String FETCHER_ATTRIBUTE= "AppUrlFetcher";
|
||||||
public static final String IS_SECURITY_ENABLED_ATTRIBUTE = "IsSecurityEnabled";
|
public static final String IS_SECURITY_ENABLED_ATTRIBUTE = "IsSecurityEnabled";
|
||||||
|
@ -126,4 +128,9 @@ public class WebAppProxy extends AbstractService {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
String getBindAddress() {
|
||||||
|
return bindAddress + ":" + port;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,8 @@ public class WebAppProxyServer extends CompositeService {
|
||||||
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
|
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
|
||||||
StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
|
StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
|
||||||
try {
|
try {
|
||||||
WebAppProxyServer proxyServer = startServer();
|
YarnConfiguration configuration = new YarnConfiguration();
|
||||||
|
WebAppProxyServer proxyServer = startServer(configuration);
|
||||||
proxyServer.proxy.join();
|
proxyServer.proxy.join();
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
LOG.fatal("Error starting Proxy server", t);
|
LOG.fatal("Error starting Proxy server", t);
|
||||||
|
@ -90,12 +91,11 @@ public class WebAppProxyServer extends CompositeService {
|
||||||
*
|
*
|
||||||
* @return proxy server instance.
|
* @return proxy server instance.
|
||||||
*/
|
*/
|
||||||
protected static WebAppProxyServer startServer() throws Exception {
|
protected static WebAppProxyServer startServer(Configuration configuration)
|
||||||
|
throws Exception {
|
||||||
WebAppProxyServer proxy = new WebAppProxyServer();
|
WebAppProxyServer proxy = new WebAppProxyServer();
|
||||||
ShutdownHookManager.get().addShutdownHook(
|
ShutdownHookManager.get().addShutdownHook(
|
||||||
new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY);
|
new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY);
|
||||||
YarnConfiguration configuration = new YarnConfiguration();
|
|
||||||
configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9099");
|
|
||||||
proxy.init(configuration);
|
proxy.init(configuration);
|
||||||
proxy.start();
|
proxy.start();
|
||||||
return proxy;
|
return proxy;
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.webproxy;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import org.apache.hadoop.service.Service;
|
||||||
import org.apache.hadoop.service.Service.STATE;
|
import org.apache.hadoop.service.Service.STATE;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
|
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
|
||||||
|
@ -29,11 +30,12 @@ import org.junit.Test;
|
||||||
|
|
||||||
public class TestWebAppProxyServer {
|
public class TestWebAppProxyServer {
|
||||||
private WebAppProxyServer webAppProxy = null;
|
private WebAppProxyServer webAppProxy = null;
|
||||||
|
private final String proxyAddress = "0.0.0.0:8888";
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
YarnConfiguration conf = new YarnConfiguration();
|
YarnConfiguration conf = new YarnConfiguration();
|
||||||
conf.set(YarnConfiguration.PROXY_ADDRESS, "0.0.0.0:8888");
|
conf.set(YarnConfiguration.PROXY_ADDRESS, proxyAddress);
|
||||||
webAppProxy = new WebAppProxyServer();
|
webAppProxy = new WebAppProxyServer();
|
||||||
webAppProxy.init(conf);
|
webAppProxy.init(conf);
|
||||||
}
|
}
|
||||||
|
@ -47,6 +49,11 @@ public class TestWebAppProxyServer {
|
||||||
public void testStart() {
|
public void testStart() {
|
||||||
assertEquals(STATE.INITED, webAppProxy.getServiceState());
|
assertEquals(STATE.INITED, webAppProxy.getServiceState());
|
||||||
webAppProxy.start();
|
webAppProxy.start();
|
||||||
|
for (Service service : webAppProxy.getServices()) {
|
||||||
|
if (service instanceof WebAppProxy) {
|
||||||
|
assertEquals(((WebAppProxy) service).getBindAddress(), proxyAddress);
|
||||||
|
}
|
||||||
|
}
|
||||||
assertEquals(STATE.STARTED, webAppProxy.getServiceState());
|
assertEquals(STATE.STARTED, webAppProxy.getServiceState());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -184,8 +184,10 @@ public class TestWebAppProxyServlet {
|
||||||
@Test(timeout=5000)
|
@Test(timeout=5000)
|
||||||
public void testWebAppProxyServerMainMethod() throws Exception {
|
public void testWebAppProxyServerMainMethod() throws Exception {
|
||||||
WebAppProxyServer mainServer = null;
|
WebAppProxyServer mainServer = null;
|
||||||
|
Configuration conf = new YarnConfiguration();
|
||||||
|
conf.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9099");
|
||||||
try {
|
try {
|
||||||
mainServer = WebAppProxyServer.startServer();
|
mainServer = WebAppProxyServer.startServer(conf);
|
||||||
int counter = 20;
|
int counter = 20;
|
||||||
|
|
||||||
URL wrongUrl = new URL("http://localhost:9099/proxy/app");
|
URL wrongUrl = new URL("http://localhost:9099/proxy/app");
|
||||||
|
|
Loading…
Reference in New Issue