HDFS-3107. Introduce truncate. Contributed by Plamen Jeliazkov.

This commit is contained in:
Plamen Jeliazkov 2015-01-12 21:53:52 -08:00 committed by Konstantin V Shvachko
parent c4cba6165a
commit 7e9358feb3
32 changed files with 1102 additions and 204 deletions

View File

@ -18,6 +18,8 @@ Trunk (Unreleased)
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
HDFS-3107. Introduce truncate. (Plamen Jeliazkov via shv)
IMPROVEMENTS
HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.

View File

@ -1916,6 +1916,21 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
SnapshotAccessControlException.class);
}
}
/**
* Truncate a file to an indicated size
* See {@link ClientProtocol#truncate(String, long)}.
*/
public boolean truncate(String src, long newLength) throws IOException {
checkOpen();
try {
return namenode.truncate(src, newLength, clientName);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
UnresolvedPathException.class);
}
}
/**
* Delete file or directory.
* See {@link ClientProtocol#delete(String, boolean)}.

View File

@ -626,7 +626,20 @@ public class DistributedFileSystem extends FileSystem {
}.resolve(this, absDst);
}
}
/**
* Truncate the file in the indicated path to the indicated size.
* @param f The path to the file to be truncated
* @param newLength The size the file is to be truncated to
*
* @return true if and client does not need to wait for block recovery,
* false if client needs to wait for block recovery.
*/
public boolean truncate(Path f, final long newLength) throws IOException {
statistics.incrementWriteOps(1);
return dfs.truncate(getPathName(f), newLength);
}
@Override
public boolean delete(Path f, final boolean recursive) throws IOException {
statistics.incrementWriteOps(1);

View File

@ -521,7 +521,37 @@ public interface ClientProtocol {
FileAlreadyExistsException, FileNotFoundException,
NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
UnresolvedLinkException, SnapshotAccessControlException, IOException;
/**
* Truncate file src to new size.
* <ul>
* <li>Fails if src is a directory.
* <li>Fails if src does not exist.
* <li>Fails if src is not closed.
* <li>Fails if new size is greater than current size.
* </ul>
* <p>
* This implementation of truncate is purely a namespace operation if truncate
* occurs at a block boundary. Requires DataNode block recovery otherwise.
* <p>
* @param src existing file
* @param newLength the target size
*
* @return true if and client does not need to wait for block recovery,
* false if client needs to wait for block recovery.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>src</code> is not found
* @throws SafeModeException truncate not allowed in safemode
* @throws UnresolvedLinkException If <code>src</code> contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
public boolean truncate(String src, long newLength, String clientName)
throws AccessControlException, FileNotFoundException, SafeModeException,
UnresolvedLinkException, SnapshotAccessControlException, IOException;
/**
* Delete the given file or directory from the file system.
* <p>

View File

@ -181,6 +181,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSto
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
@ -584,6 +586,18 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
return VOID_RENAME2_RESPONSE;
}
@Override
public TruncateResponseProto truncate(RpcController controller,
TruncateRequestProto req) throws ServiceException {
try {
boolean result = server.truncate(req.getSrc(), req.getNewLength(),
req.getClientName());
return TruncateResponseProto.newBuilder().setResult(result).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public DeleteResponseProto delete(RpcController controller,
DeleteRequestProto req) throws ServiceException {

View File

@ -155,6 +155,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuo
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
@ -301,6 +302,21 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
public boolean truncate(String src, long newLength, String clientName)
throws IOException, UnresolvedLinkException {
TruncateRequestProto req = TruncateRequestProto.newBuilder()
.setSrc(src)
.setNewLength(newLength)
.setClientName(clientName)
.build();
try {
return rpcProxy.truncate(null, req).getResult();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public LastBlockWithStatus append(String src, String clientName)
throws AccessControlException, DSQuotaExceededException,

View File

@ -608,13 +608,15 @@ public class PBHelper {
}
LocatedBlockProto lb = PBHelper.convert((LocatedBlock)b);
return RecoveringBlockProto.newBuilder().setBlock(lb)
.setNewGenStamp(b.getNewGenerationStamp()).build();
.setNewGenStamp(b.getNewGenerationStamp())
.setTruncateFlag(b.getTruncateFlag()).build();
}
public static RecoveringBlock convert(RecoveringBlockProto b) {
ExtendedBlock block = convert(b.getBlock().getB());
DatanodeInfo[] locs = convert(b.getBlock().getLocsList());
return new RecoveringBlock(block, locs, b.getNewGenStamp());
return new RecoveringBlock(block, locs, b.getNewGenStamp(),
b.getTruncateFlag());
}
public static DatanodeInfoProto.AdminState convert(

View File

@ -273,7 +273,11 @@ public class BlockInfoUnderConstruction extends BlockInfo {
* make it primary.
*/
public void initializeBlockRecovery(long recoveryId) {
setBlockUCState(BlockUCState.UNDER_RECOVERY);
initializeBlockRecovery(BlockUCState.UNDER_RECOVERY, recoveryId);
}
public void initializeBlockRecovery(BlockUCState s, long recoveryId) {
setBlockUCState(s);
blockRecoveryId = recoveryId;
if (replicas.size() == 0) {
NameNode.blockStateChangeLog.warn("BLOCK*"

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -1439,10 +1440,12 @@ public class DatanodeManager {
LOG.info("Skipped stale nodes for recovery : " +
(storages.length - recoveryLocations.size()));
}
boolean isTruncate = b.getBlockUCState().equals(
HdfsServerConstants.BlockUCState.BEING_TRUNCATED);
brCommand.add(new RecoveringBlock(
new ExtendedBlock(blockPoolId, b),
DatanodeStorageInfo.toDatanodeInfos(recoveryLocations),
b.getBlockRecoveryId()));
b.getBlockRecoveryId(), isTruncate));
} else {
// If too many replicas are stale, then choose all replicas to participate
// in block recovery.

View File

@ -240,7 +240,7 @@ public class DatanodeStorageInfo {
return result;
}
boolean removeBlock(BlockInfo b) {
public boolean removeBlock(BlockInfo b) {
blockList = b.listRemove(blockList, this);
if (b.removeStorage(this)) {
numBlocks--;

View File

@ -299,6 +299,13 @@ public final class HdfsServerConstants {
* which synchronizes the existing replicas contents.
*/
UNDER_RECOVERY,
/**
* The block is being truncated.<br>
* When a file is truncated its last block may need to be truncated
* and needs to go through a recovery procedure,
* which synchronizes the existing replicas contents.
*/
BEING_TRUNCATED,
/**
* The block is committed.<br>
* The client reported that all bytes are written to data-nodes

View File

@ -2691,7 +2691,10 @@ public class DataNode extends ReconfigurableBase
r.rInfo.getNumBytes() == finalizedLength)
participatingList.add(r);
}
newBlock.setNumBytes(finalizedLength);
if(rBlock.getTruncateFlag())
newBlock.setNumBytes(rBlock.getBlock().getNumBytes());
else
newBlock.setNumBytes(finalizedLength);
break;
case RBW:
case RWR:
@ -2703,7 +2706,10 @@ public class DataNode extends ReconfigurableBase
participatingList.add(r);
}
}
newBlock.setNumBytes(minLength);
if(rBlock.getTruncateFlag())
newBlock.setNumBytes(rBlock.getBlock().getNumBytes());
else
newBlock.setNumBytes(minLength);
break;
case RUR:
case TEMPORARY:

View File

@ -1087,7 +1087,71 @@ public class FSDirectory implements Closeable {
public INodeMap getINodeMap() {
return inodeMap;
}
/**
* FSEditLogLoader implementation.
* Unlike FSNamesystem.truncate, this will not schedule block recovery.
*/
void unprotectedTruncate(String src, String clientName, String clientMachine,
long newLength, long mtime)
throws UnresolvedLinkException, QuotaExceededException,
SnapshotAccessControlException, IOException {
INodesInPath iip = getINodesInPath(src, true);
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
boolean onBlockBoundary =
unprotectedTruncate(iip, newLength, collectedBlocks, mtime);
if(! onBlockBoundary) {
getFSNamesystem().prepareFileForWrite(src,
iip, clientName, clientMachine, false, false);
}
getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
boolean truncate(INodesInPath iip, long newLength,
BlocksMapUpdateInfo collectedBlocks,
long mtime)
throws IOException {
writeLock();
try {
return unprotectedTruncate(iip, newLength, collectedBlocks, mtime);
} finally {
writeUnlock();
}
}
/**
* Truncate has the following properties:
* 1.) Any block deletions occur now.
* 2.) INode length is truncated now clients can only read up to new length.
* 3.) INode will be set to UC and lastBlock set to UNDER_RECOVERY.
* 4.) NN will trigger DN truncation recovery and waits for DNs to report.
* 5.) File is considered UNDER_RECOVERY until truncation recovery completes.
* 6.) Soft and hard Lease expiration require truncation recovery to complete.
*
* @return true if on the block boundary or false if recovery is need
*/
boolean unprotectedTruncate(INodesInPath iip, long newLength,
BlocksMapUpdateInfo collectedBlocks,
long mtime) throws IOException {
assert hasWriteLock();
INodeFile file = iip.getLastINode().asFile();
long oldDiskspace = file.diskspaceConsumed();
long remainingLength =
file.collectBlocksBeyondMax(newLength, collectedBlocks);
file.setModificationTime(mtime);
updateCount(iip, 0, file.diskspaceConsumed() - oldDiskspace, true);
// If on block boundary, then return
long lastBlockDelta = remainingLength - newLength;
if(lastBlockDelta == 0)
return true;
// Set new last block length
BlockInfo lastBlock = file.getLastBlock();
assert lastBlock.getNumBytes() - lastBlockDelta > 0 : "wrong block size";
lastBlock.setNumBytes(lastBlock.getNumBytes() - lastBlockDelta);
return false;
}
/**
* This method is always called with writeLock of FSDirectory held.
*/

View File

@ -86,6 +86,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetStoragePolicyOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TruncateOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp;
@ -896,6 +897,20 @@ public class FSEditLog implements LogsPurgeable {
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Add truncate file record to edit log
*/
void logTruncate(String src, String clientName, String clientMachine,
long size, long timestamp) {
TruncateOp op = TruncateOp.getInstance(cache.get())
.setPath(src)
.setClientName(clientName)
.setClientMachine(clientMachine)
.setNewLength(size)
.setTimestamp(timestamp);
logEdit(op);
}
/**
* Add legacy block generation stamp record to edit log

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TruncateOp;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormat.renameReservedPathsOnUpgrade;
import static org.apache.hadoop.util.Time.now;
@ -853,6 +854,12 @@ public class FSEditLogLoader {
}
break;
}
case OP_TRUNCATE: {
TruncateOp truncateOp = (TruncateOp) op;
fsDir.unprotectedTruncate(truncateOp.src, truncateOp.clientName,
truncateOp.clientMachine, truncateOp.newLength, truncateOp.timestamp);
break;
}
case OP_SET_STORAGE_POLICY: {
SetStoragePolicyOp setStoragePolicyOp = (SetStoragePolicyOp) op;
final String path = renameReservedPathsOnUpgrade(setStoragePolicyOp.path,

View File

@ -59,6 +59,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_XAT
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_START_LOG_SEGMENT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SYMLINK;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TIMES;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TRUNCATE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_UPDATE_BLOCKS;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_UPDATE_MASTER_KEY;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_STORAGE_POLICY;
@ -180,6 +181,7 @@ public abstract class FSEditLogOp {
inst.put(OP_START_LOG_SEGMENT, new LogSegmentOp(OP_START_LOG_SEGMENT));
inst.put(OP_END_LOG_SEGMENT, new LogSegmentOp(OP_END_LOG_SEGMENT));
inst.put(OP_UPDATE_BLOCKS, new UpdateBlocksOp());
inst.put(OP_TRUNCATE, new TruncateOp());
inst.put(OP_ALLOW_SNAPSHOT, new AllowSnapshotOp());
inst.put(OP_DISALLOW_SNAPSHOT, new DisallowSnapshotOp());
@ -2602,6 +2604,115 @@ public abstract class FSEditLogOp {
readRpcIdsFromXml(st);
}
}
static class TruncateOp extends FSEditLogOp {
String src;
String clientName;
String clientMachine;
long newLength;
long timestamp;
private TruncateOp() {
super(OP_TRUNCATE);
}
static TruncateOp getInstance(OpInstanceCache cache) {
return (TruncateOp)cache.get(OP_TRUNCATE);
}
@Override
void resetSubFields() {
src = null;
clientName = null;
clientMachine = null;
newLength = 0L;
timestamp = 0L;
}
TruncateOp setPath(String src) {
this.src = src;
return this;
}
TruncateOp setClientName(String clientName) {
this.clientName = clientName;
return this;
}
TruncateOp setClientMachine(String clientMachine) {
this.clientMachine = clientMachine;
return this;
}
TruncateOp setNewLength(long newLength) {
this.newLength = newLength;
return this;
}
TruncateOp setTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
src = FSImageSerialization.readString(in);
clientName = FSImageSerialization.readString(in);
clientMachine = FSImageSerialization.readString(in);
newLength = FSImageSerialization.readLong(in);
timestamp = FSImageSerialization.readLong(in);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(src, out);
FSImageSerialization.writeString(clientName, out);
FSImageSerialization.writeString(clientMachine, out);
FSImageSerialization.writeLong(newLength, out);
FSImageSerialization.writeLong(timestamp, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "CLIENTNAME", clientName);
XMLUtils.addSaxString(contentHandler, "CLIENTMACHINE", clientMachine);
XMLUtils.addSaxString(contentHandler, "NEWLENGTH",
Long.toString(newLength));
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.toString(timestamp));
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
this.clientName = st.getValue("CLIENTNAME");
this.clientMachine = st.getValue("CLIENTMACHINE");
this.newLength = Long.parseLong(st.getValue("NEWLENGTH"));
this.timestamp = Long.parseLong(st.getValue("TIMESTAMP"));
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("TruncateOp [src=");
builder.append(src);
builder.append(", clientName=");
builder.append(clientName);
builder.append(", clientMachine=");
builder.append(clientMachine);
builder.append(", newLength=");
builder.append(newLength);
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
/**
* {@literal @Idempotent} for {@link ClientProtocol#recoverLease}. In the

View File

@ -73,6 +73,7 @@ public enum FSEditLogOpCodes {
OP_SET_XATTR ((byte) 43),
OP_REMOVE_XATTR ((byte) 44),
OP_SET_STORAGE_POLICY ((byte) 45),
OP_TRUNCATE ((byte) 46),
// Note that the current range of the valid OP code is 0~127
OP_INVALID ((byte) -1);

View File

@ -1906,6 +1906,114 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
logAuditEvent(true, "setTimes", src, null, auditStat);
}
/**
* Truncate file to a lower length.
* Truncate cannot be reverted / recovered from as it causes data loss.
* Truncation at block boundary is atomic, otherwise it requires
* block recovery to truncate the last block of the file.
*
* @return true if and client does not need to wait for block recovery,
* false if client needs to wait for block recovery.
*/
boolean truncate(String src, long newLength,
String clientName, String clientMachine,
long mtime)
throws IOException, UnresolvedLinkException {
boolean ret;
try {
ret = truncateInt(src, newLength, clientName, clientMachine, mtime);
} catch (AccessControlException e) {
logAuditEvent(false, "truncate", src);
throw e;
}
return ret;
}
boolean truncateInt(String srcArg, long newLength,
String clientName, String clientMachine,
long mtime)
throws IOException, UnresolvedLinkException {
String src = srcArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src="
+ src + " newLength=" + newLength);
}
HdfsFileStatus stat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
boolean res;
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot truncate for " + src);
src = dir.resolvePath(pc, src, pathComponents);
res = truncateInternal(src, newLength, clientName,
clientMachine, mtime, pc);
stat = FSDirStatAndListingOp.getFileInfo(dir, src, false,
FSDirectory.isReservedRawName(src), true);
} finally {
writeUnlock();
}
getEditLog().logSync();
logAuditEvent(true, "truncate", src, null, stat);
return res;
}
/**
* Truncate a file to a given size
* Update the count at each ancestor directory with quota
*/
boolean truncateInternal(String src, long newLength,
String clientName, String clientMachine,
long mtime, FSPermissionChecker pc)
throws IOException, UnresolvedLinkException {
assert hasWriteLock();
INodesInPath iip = dir.getINodesInPath4Write(src, true);
if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.WRITE);
}
INodeFile file = iip.getLastINode().asFile();
// Data will be lost after truncate occurs so it cannot support snapshots.
if(file.isInLatestSnapshot(iip.getLatestSnapshotId()))
throw new HadoopIllegalArgumentException(
"Cannot truncate file with snapshot.");
// Opening an existing file for write. May need lease recovery.
recoverLeaseInternal(iip, src, clientName, clientMachine, false);
// Refresh INode as the file could have been closed
iip = dir.getINodesInPath4Write(src, true);
file = INodeFile.valueOf(iip.getLastINode(), src);
// Truncate length check.
long oldLength = file.computeFileSize();
if(oldLength == newLength)
return true;
if(oldLength < newLength)
throw new HadoopIllegalArgumentException(
"Cannot truncate to a larger file size. Current size: " + oldLength +
", truncate size: " + newLength + ".");
// Perform INodeFile truncation.
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
boolean onBlockBoundary = dir.truncate(iip, newLength,
collectedBlocks, mtime);
if(! onBlockBoundary) {
// Open file for write, but don't log into edits
prepareFileForWrite(src, iip, clientName, clientMachine, false, false);
file = INodeFile.valueOf(dir.getINode4Write(src), src);
initializeBlockRecovery(file);
}
getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime);
removeBlocks(collectedBlocks);
return onBlockBoundary;
}
void initializeBlockRecovery(INodeFile inodeFile) throws IOException {
BlockInfo lastBlock = inodeFile.getLastBlock();
long recoveryId = nextGenerationStamp(blockIdManager.isLegacyBlock(lastBlock));
((BlockInfoUnderConstruction)lastBlock).initializeBlockRecovery(
BlockUCState.BEING_TRUNCATED, recoveryId);
}
/**
* Create a symbolic link.
*/
@ -2615,7 +2723,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} else {
final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
&& (lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY ||
lastBlock.getBlockUCState() == BlockUCState.BEING_TRUNCATED)) {
throw new RecoveryInProgressException("Recovery in progress, file ["
+ src + "], " + "lease owner [" + lease.getHolder() + "]");
} else {
@ -3833,6 +3942,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
throw new AlreadyBeingCreatedException(message);
case UNDER_CONSTRUCTION:
case UNDER_RECOVERY:
case BEING_TRUNCATED:
final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)lastBlock;
// setup the last block locations from the blockManager if not known
if (uc.getNumExpectedLocations() == 0) {
@ -3854,7 +3964,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
// start recovery of the last block for this file
long blockRecoveryId = nextGenerationStamp(blockIdManager.isLegacyBlock(uc));
lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);
uc.initializeBlockRecovery(blockRecoveryId);
if (uc.getBlockUCState() != BlockUCState.BEING_TRUNCATED) {
uc.initializeBlockRecovery(blockRecoveryId);
}
leaseManager.renewLease(lease);
// Cannot close file right now, since the last block requires recovery.
// This may potentially cause infinite loop in lease recovery

View File

@ -696,4 +696,43 @@ public class INodeFile extends INodeWithAdditionalFields
out.print(blocks == null || blocks.length == 0? null: blocks[0]);
out.println();
}
/**
* Remove full blocks at the end file up to newLength
* @return sum of sizes of the remained blocks
*/
public long collectBlocksBeyondMax(final long max,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] oldBlocks = getBlocks();
if (oldBlocks == null)
return 0;
//find the minimum n such that the size of the first n blocks > max
int n = 0;
long size = 0;
for(; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
if (n >= oldBlocks.length)
return size;
// starting from block n, the data is beyond max.
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = BlockInfo.EMPTY_ARRAY;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
// set new blocks
setBlocks(newBlocks);
// collect the blocks beyond max
if (collectedBlocks != null) {
for(; n < oldBlocks.length; n++) {
collectedBlocks.addDeleteBlock(oldBlocks[n]);
}
}
return size;
}
}

View File

@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH;
import static org.apache.hadoop.util.Time.now;
import java.io.FileNotFoundException;
import java.io.IOException;
@ -882,6 +883,22 @@ class NameNodeRpcServer implements NamenodeProtocols {
metrics.incrFilesRenamed();
}
@Override // ClientProtocol
public boolean truncate(String src, long newLength, String clientName)
throws IOException {
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.truncate: " + src + " to " +
newLength);
}
String clientMachine = getClientMachine();
try {
return namesystem.truncate(
src, newLength, clientName, clientMachine, now());
} finally {
metrics.incrFilesTruncated();
}
}
@Override // ClientProtocol
public boolean delete(String src, boolean recursive) throws IOException {
if (stateChangeLog.isDebugEnabled()) {

View File

@ -47,6 +47,7 @@ public class NameNodeMetrics {
@Metric MutableCounterLong filesAppended;
@Metric MutableCounterLong getBlockLocations;
@Metric MutableCounterLong filesRenamed;
@Metric MutableCounterLong filesTruncated;
@Metric MutableCounterLong getListingOps;
@Metric MutableCounterLong deleteFileOps;
@Metric("Number of files/dirs deleted by delete or rename operations")
@ -173,6 +174,10 @@ public class NameNodeMetrics {
filesRenamed.incr();
}
public void incrFilesTruncated() {
filesTruncated.incr();
}
public void incrFilesDeleted(long delta) {
filesDeleted.incr(delta);
}

View File

@ -21,7 +21,6 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@ -182,40 +181,6 @@ public class FileWithSnapshotFeature implements INode.Feature {
max = file.computeFileSize();
}
collectBlocksBeyondMax(file, max, info);
}
private void collectBlocksBeyondMax(final INodeFile file, final long max,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] oldBlocks = file.getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
// starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = BlockInfo.EMPTY_ARRAY;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
// set new blocks
file.setBlocks(newBlocks);
// collect the blocks beyond max.
if (collectedBlocks != null) {
for(; n < oldBlocks.length; n++) {
collectedBlocks.addDeleteBlock(oldBlocks[n]);
}
}
}
}
file.collectBlocksBeyondMax(max, info);
}
}

View File

@ -53,6 +53,7 @@ public class BlockRecoveryCommand extends DatanodeCommand {
@InterfaceAudience.Private
@InterfaceStability.Evolving
public static class RecoveringBlock extends LocatedBlock {
private boolean truncate;
private final long newGenerationStamp;
/**
@ -63,6 +64,15 @@ public class BlockRecoveryCommand extends DatanodeCommand {
this.newGenerationStamp = newGS;
}
/**
* RecoveryingBlock with truncate option.
*/
public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS,
boolean truncate) {
this(b, locs, newGS);
this.truncate = truncate;
}
/**
* Return the new generation stamp of the block,
* which also plays role of the recovery id.
@ -70,6 +80,13 @@ public class BlockRecoveryCommand extends DatanodeCommand {
public long getNewGenerationStamp() {
return newGenerationStamp;
}
/**
* Return whether to truncate the block to the ExtendedBlock's length.
*/
public boolean getTruncateFlag() {
return truncate;
}
}
/**

View File

@ -198,6 +198,16 @@ message ConcatRequestProto {
message ConcatResponseProto { // void response
}
message TruncateRequestProto {
required string src = 1;
required uint64 newLength = 2;
required string clientName = 3;
}
message TruncateResponseProto {
required bool result = 1;
}
message RenameRequestProto {
required string src = 1;
required string dst = 2;
@ -722,6 +732,7 @@ service ClientNamenodeProtocol {
rpc reportBadBlocks(ReportBadBlocksRequestProto)
returns(ReportBadBlocksResponseProto);
rpc concat(ConcatRequestProto) returns(ConcatResponseProto);
rpc truncate(TruncateRequestProto) returns(TruncateResponseProto);
rpc rename(RenameRequestProto) returns(RenameResponseProto);
rpc rename2(Rename2RequestProto) returns(Rename2ResponseProto);
rpc delete(DeleteRequestProto) returns(DeleteResponseProto);

View File

@ -556,6 +556,7 @@ enum ReplicaStateProto {
message RecoveringBlockProto {
required uint64 newGenStamp = 1; // New genstamp post recovery
required LocatedBlockProto block = 2; // Block to be recovered
optional bool truncateFlag = 3; // Block needs to be truncated
}
/**

View File

@ -1194,7 +1194,13 @@ public class DFSTestUtil {
DFSTestUtil.createFile(filesystem, pathConcatFiles[1], length, replication,
seed);
filesystem.concat(pathConcatTarget, pathConcatFiles);
// OP_TRUNCATE 46
length = blockSize * 2;
DFSTestUtil.createFile(filesystem, pathFileCreate, length, replication,
seed);
filesystem.truncate(pathFileCreate, blockSize);
// OP_SYMLINK 17
Path pathSymlink = new Path("/file_symlink");
fc.createSymlink(pathConcatTarget, pathSymlink, false);

View File

@ -71,7 +71,7 @@ public class TestDFSInotifyEventInputStream {
*/
@Test
public void testOpcodeCount() {
Assert.assertEquals(47, FSEditLogOpCodes.values().length);
Assert.assertEquals(48, FSEditLogOpCodes.values().length);
}

View File

@ -0,0 +1,289 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.InetAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFileTruncate {
static {
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.ALL);
}
static final int BLOCK_SIZE = 4;
static final short REPLICATION = 3;
static final int DATANODE_NUM = 3;
static final int SUCCESS_ATTEMPTS = 300;
static final int RECOVERY_ATTEMPTS = 600;
static final long SLEEP = 100L;
static final long LOW_SOFTLIMIT = 100L;
static final long LOW_HARDLIMIT = 200L;
static final int SHORT_HEARTBEAT = 1;
static Configuration conf;
static MiniDFSCluster cluster;
static DistributedFileSystem fs;
@BeforeClass
public static void startUp() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
cluster = new MiniDFSCluster.Builder(conf)
.format(true)
.numDataNodes(DATANODE_NUM)
.nameNodePort(NameNode.DEFAULT_PORT)
.waitSafeMode(true)
.build();
fs = cluster.getFileSystem();
}
@AfterClass
public static void tearDown() throws IOException {
if(fs != null) fs.close();
if(cluster != null) cluster.shutdown();
}
/**
* Truncate files of different sizes byte by byte.
*/
@Test
public void testBasicTruncate() throws IOException {
int startingFileSize = 3 * BLOCK_SIZE;
Path parent = new Path("/test");
fs.mkdirs(parent);
fs.setQuota(parent, 100, 1000);
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
for (int fileLength = startingFileSize; fileLength > 0;
fileLength -= BLOCK_SIZE - 1) {
for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
final Path p = new Path(parent, "testBasicTruncate" + fileLength);
writeContents(contents, fileLength, p);
int newLength = fileLength - toTruncate;
boolean isReady = fs.truncate(p, newLength);
if(!isReady)
checkBlockRecovery(p);
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage",
cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
}
}
fs.delete(parent, true);
}
/**
* Failure / recovery test for truncate.
* In this failure the DNs fail to recover the blocks and the NN triggers
* lease recovery.
* File stays in RecoveryInProgress until DataNodes report recovery.
*/
@Test
public void testTruncateFailure() throws IOException {
int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
int toTruncate = 1;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path p = new Path("/testTruncateFailure");
FSDataOutputStream out = fs.create(p, false, BLOCK_SIZE, REPLICATION,
BLOCK_SIZE);
out.write(contents, 0, startingFileSize);
try {
fs.truncate(p, 0);
fail("Truncate must fail on open file.");
} catch(IOException expected) {}
out.close();
cluster.shutdownDataNodes();
NameNodeAdapter.getLeaseManager(cluster.getNamesystem())
.setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);
int newLength = startingFileSize - toTruncate;
boolean isReady = fs.truncate(p, newLength);
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
boolean recoveryTriggered = false;
for(int i = 0; i < RECOVERY_ATTEMPTS; i++) {
String leaseHolder =
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(),
p.toUri().getPath());
if(leaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
cluster.startDataNodes(conf, DATANODE_NUM, true,
HdfsServerConstants.StartupOption.REGULAR, null);
recoveryTriggered = true;
break;
}
try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {}
}
assertThat("lease recovery should have occurred in ~" +
SLEEP * RECOVERY_ATTEMPTS + " ms.", recoveryTriggered, is(true));
checkBlockRecovery(p);
NameNodeAdapter.getLeaseManager(cluster.getNamesystem())
.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsConstants.LEASE_HARDLIMIT_PERIOD);
fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
fs.delete(p, false);
}
/**
* EditLogOp load test for Truncate.
*/
@Test
public void testTruncateEditLogLoad() throws IOException {
int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
int toTruncate = 1;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path p = new Path("/testTruncateEditLogLoad");
writeContents(contents, startingFileSize, p);
int newLength = startingFileSize - toTruncate;
boolean isReady = fs.truncate(p, newLength);
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
checkBlockRecovery(p);
cluster.restartNameNode();
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
fs.delete(p, false);
}
/**
* Check truncate recovery.
*/
@Test
public void testTruncateLastBlock() throws IOException {
FSNamesystem fsn = cluster.getNamesystem();
String src = "/file";
Path srcPath = new Path(src);
byte[] contents = AppendTestUtil.initBuffer(BLOCK_SIZE);
writeContents(contents, BLOCK_SIZE, srcPath);
INodeFile inode = fsn.getFSDirectory().getINode(src).asFile();
long oldGenstamp = GenerationStamp.LAST_RESERVED_STAMP;
DatanodeDescriptor dn = DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo(
dn.getDatanodeUuid(), InetAddress.getLocalHost().getHostAddress());
dn.isAlive = true;
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
new Block(0, 1, oldGenstamp), (short) 1,
HdfsServerConstants.BlockUCState.BEING_TRUNCATED,
new DatanodeStorageInfo[] {storage});
inode.setBlocks(new BlockInfo[] {blockInfo});
fsn.writeLock();
try {
fsn.initializeBlockRecovery(inode);
assertThat(inode.getLastBlock().getBlockUCState(),
is(HdfsServerConstants.BlockUCState.BEING_TRUNCATED));
long blockRecoveryId = ((BlockInfoUnderConstruction) inode.getLastBlock())
.getBlockRecoveryId();
assertThat(blockRecoveryId, is(oldGenstamp + 2));
} finally {
fsn.writeUnlock();
}
}
static void writeContents(byte[] contents, int fileLength, Path p)
throws IOException {
FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION,
BLOCK_SIZE);
out.write(contents, 0, fileLength);
out.close();
}
static void checkBlockRecovery(Path p) throws IOException {
boolean success = false;
for(int i = 0; i < SUCCESS_ATTEMPTS; i++) {
LocatedBlocks blocks = getLocatedBlocks(p);
boolean noLastBlock = blocks.getLastLocatedBlock() == null;
if(!blocks.isUnderConstruction() &&
(noLastBlock || blocks.isLastBlockComplete())) {
success = true;
break;
}
try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {}
}
assertThat("inode should complete in ~" + SLEEP * SUCCESS_ATTEMPTS + " ms.",
success, is(true));
}
static LocatedBlocks getLocatedBlocks(Path src) throws IOException {
return fs.getClient().getLocatedBlocks(src.toString(), 0, Long.MAX_VALUE);
}
}

View File

@ -409,7 +409,7 @@ public class TestNamenodeRetryCache {
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
assertEquals(23, cacheSet.size());
assertEquals(24, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@ -428,7 +428,7 @@ public class TestNamenodeRetryCache {
assertTrue(namesystem.hasRetryCache());
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
.getRetryCache().getCacheSet();
assertEquals(23, cacheSet.size());
assertEquals(24, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();

View File

@ -163,7 +163,7 @@ public class TestRetryCacheWithHA {
FSNamesystem fsn0 = cluster.getNamesystem(0);
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
assertEquals(23, cacheSet.size());
assertEquals(24, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@ -184,7 +184,7 @@ public class TestRetryCacheWithHA {
FSNamesystem fsn1 = cluster.getNamesystem(1);
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
.getRetryCache().getCacheSet();
assertEquals(23, cacheSet.size());
assertEquals(24, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();

View File

@ -13,8 +13,8 @@
<TXID>2</TXID>
<DELEGATION_KEY>
<KEY_ID>1</KEY_ID>
<EXPIRY_DATE>1412805665311</EXPIRY_DATE>
<KEY>c1cad1109e33ae77</KEY>
<EXPIRY_DATE>1421822547136</EXPIRY_DATE>
<KEY>24319c7d1f7c0828</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@ -24,8 +24,8 @@
<TXID>3</TXID>
<DELEGATION_KEY>
<KEY_ID>2</KEY_ID>
<EXPIRY_DATE>1412805665314</EXPIRY_DATE>
<KEY>0632068587d6574c</KEY>
<EXPIRY_DATE>1421822547140</EXPIRY_DATE>
<KEY>254b1207021431f4</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@ -37,19 +37,19 @@
<INODEID>16386</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114467969</MTIME>
<ATIME>1412114467969</ATIME>
<MTIME>1421131348286</MTIME>
<ATIME>1421131348286</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1474796918_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>13</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>6</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -60,14 +60,14 @@
<INODEID>0</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468019</MTIME>
<ATIME>1412114467969</ATIME>
<MTIME>1421131348328</MTIME>
<ATIME>1421131348286</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<OVERWRITE>false</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -78,7 +78,7 @@
<DATA>
<TXID>6</TXID>
<PATH>/file_create</PATH>
<POLICYID>12</POLICYID>
<POLICYID>7</POLICYID>
</DATA>
</RECORD>
<RECORD>
@ -88,9 +88,9 @@
<LENGTH>0</LENGTH>
<SRC>/file_create</SRC>
<DST>/file_moved</DST>
<TIMESTAMP>1412114468027</TIMESTAMP>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>16</RPC_CALLID>
<TIMESTAMP>1421131348343</TIMESTAMP>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>9</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -99,9 +99,9 @@
<TXID>8</TXID>
<LENGTH>0</LENGTH>
<PATH>/file_moved</PATH>
<TIMESTAMP>1412114468034</TIMESTAMP>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>17</RPC_CALLID>
<TIMESTAMP>1421131348353</TIMESTAMP>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>10</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -111,9 +111,9 @@
<LENGTH>0</LENGTH>
<INODEID>16387</INODEID>
<PATH>/directory_mkdir</PATH>
<TIMESTAMP>1412114468041</TIMESTAMP>
<TIMESTAMP>1421131348366</TIMESTAMP>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>493</MODE>
</PERMISSION_STATUS>
@ -146,8 +146,8 @@
<TXID>13</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>22</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>15</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -157,8 +157,8 @@
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
<SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>23</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>16</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -167,8 +167,8 @@
<TXID>15</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>24</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>17</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -179,19 +179,19 @@
<INODEID>16388</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468073</MTIME>
<ATIME>1412114468073</ATIME>
<MTIME>1421131348401</MTIME>
<ATIME>1421131348401</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1474796918_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>25</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>18</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -202,14 +202,14 @@
<INODEID>0</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468075</MTIME>
<ATIME>1412114468073</ATIME>
<MTIME>1421131348405</MTIME>
<ATIME>1421131348401</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<OVERWRITE>false</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -265,10 +265,10 @@
<LENGTH>0</LENGTH>
<SRC>/file_create</SRC>
<DST>/file_moved</DST>
<TIMESTAMP>1412114468093</TIMESTAMP>
<TIMESTAMP>1421131348436</TIMESTAMP>
<OPTIONS>NONE</OPTIONS>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>32</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>25</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -279,19 +279,19 @@
<INODEID>16389</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468097</MTIME>
<ATIME>1412114468097</ATIME>
<MTIME>1421131348443</MTIME>
<ATIME>1421131348443</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1474796918_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>34</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>27</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -396,8 +396,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468349</MTIME>
<ATIME>1412114468097</ATIME>
<MTIME>1421131348998</MTIME>
<ATIME>1421131348443</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@ -418,7 +418,7 @@
<GENSTAMP>1003</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -432,19 +432,19 @@
<INODEID>16390</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468351</MTIME>
<ATIME>1412114468351</ATIME>
<MTIME>1421131349001</MTIME>
<ATIME>1421131349001</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1474796918_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>47</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>38</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -549,8 +549,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468370</MTIME>
<ATIME>1412114468351</ATIME>
<MTIME>1421131349032</MTIME>
<ATIME>1421131349001</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@ -571,7 +571,7 @@
<GENSTAMP>1006</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -585,19 +585,19 @@
<INODEID>16391</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468373</MTIME>
<ATIME>1412114468373</ATIME>
<MTIME>1421131349036</MTIME>
<ATIME>1421131349036</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1474796918_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>59</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>47</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
@ -702,8 +702,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468392</MTIME>
<ATIME>1412114468373</ATIME>
<MTIME>1421131349060</MTIME>
<ATIME>1421131349036</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@ -724,7 +724,7 @@
<GENSTAMP>1009</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -736,76 +736,57 @@
<TXID>57</TXID>
<LENGTH>0</LENGTH>
<TRG>/file_concat_target</TRG>
<TIMESTAMP>1412114468395</TIMESTAMP>
<TIMESTAMP>1421131349064</TIMESTAMP>
<SOURCES>
<SOURCE1>/file_concat_0</SOURCE1>
<SOURCE2>/file_concat_1</SOURCE2>
</SOURCES>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>70</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SYMLINK</OPCODE>
<DATA>
<TXID>58</TXID>
<LENGTH>0</LENGTH>
<INODEID>16392</INODEID>
<PATH>/file_symlink</PATH>
<VALUE>/file_concat_target</VALUE>
<MTIME>1412114468398</MTIME>
<ATIME>1412114468398</ATIME>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>511</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>71</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>55</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ADD</OPCODE>
<DATA>
<TXID>59</TXID>
<TXID>58</TXID>
<LENGTH>0</LENGTH>
<INODEID>16393</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<INODEID>16392</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114468401</MTIME>
<ATIME>1412114468401</ATIME>
<MTIME>1421131349068</MTIME>
<ATIME>1421131349068</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_1474796918_1</CLIENT_NAME>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>72</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>57</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
<DATA>
<TXID>60</TXID>
<TXID>59</TXID>
<BLOCK_ID>1073741834</BLOCK_ID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
<DATA>
<TXID>61</TXID>
<TXID>60</TXID>
<GENSTAMPV2>1010</GENSTAMPV2>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ADD_BLOCK</OPCODE>
<DATA>
<TXID>62</TXID>
<PATH>/hard-lease-recovery-test</PATH>
<TXID>61</TXID>
<PATH>/file_create</PATH>
<BLOCK>
<BLOCK_ID>1073741834</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
@ -816,15 +797,160 @@
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
<DATA>
<TXID>62</TXID>
<BLOCK_ID>1073741835</BLOCK_ID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
<DATA>
<TXID>63</TXID>
<PATH>/hard-lease-recovery-test</PATH>
<GENSTAMPV2>1011</GENSTAMPV2>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ADD_BLOCK</OPCODE>
<DATA>
<TXID>64</TXID>
<PATH>/file_create</PATH>
<BLOCK>
<BLOCK_ID>1073741834</BLOCK_ID>
<NUM_BYTES>11</NUM_BYTES>
<NUM_BYTES>512</NUM_BYTES>
<GENSTAMP>1010</GENSTAMP>
</BLOCK>
<BLOCK>
<BLOCK_ID>1073741835</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
<GENSTAMP>1011</GENSTAMP>
</BLOCK>
<RPC_CLIENTID></RPC_CLIENTID>
<RPC_CALLID>-2</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
<TXID>65</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
<PATH>/file_create</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1421131349085</MTIME>
<ATIME>1421131349068</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<OVERWRITE>false</OVERWRITE>
<BLOCK>
<BLOCK_ID>1073741834</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
<GENSTAMP>1010</GENSTAMP>
</BLOCK>
<BLOCK>
<BLOCK_ID>1073741835</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
<GENSTAMP>1011</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_TRUNCATE</OPCODE>
<DATA>
<TXID>66</TXID>
<SRC>/file_create</SRC>
<CLIENTNAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENTNAME>
<CLIENTMACHINE>127.0.0.1</CLIENTMACHINE>
<NEWLENGTH>512</NEWLENGTH>
<TIMESTAMP>1421131349088</TIMESTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SYMLINK</OPCODE>
<DATA>
<TXID>67</TXID>
<LENGTH>0</LENGTH>
<INODEID>16393</INODEID>
<PATH>/file_symlink</PATH>
<VALUE>/file_concat_target</VALUE>
<MTIME>1421131349095</MTIME>
<ATIME>1421131349095</ATIME>
<PERMISSION_STATUS>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>511</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>64</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ADD</OPCODE>
<DATA>
<TXID>68</TXID>
<LENGTH>0</LENGTH>
<INODEID>16394</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1421131349098</MTIME>
<ATIME>1421131349098</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_526346936_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<OVERWRITE>true</OVERWRITE>
<PERMISSION_STATUS>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>65</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
<DATA>
<TXID>69</TXID>
<BLOCK_ID>1073741836</BLOCK_ID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
<DATA>
<TXID>70</TXID>
<GENSTAMPV2>1012</GENSTAMPV2>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ADD_BLOCK</OPCODE>
<DATA>
<TXID>71</TXID>
<PATH>/hard-lease-recovery-test</PATH>
<BLOCK>
<BLOCK_ID>1073741836</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
<GENSTAMP>1012</GENSTAMP>
</BLOCK>
<RPC_CLIENTID></RPC_CLIENTID>
<RPC_CALLID>-2</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
<TXID>72</TXID>
<PATH>/hard-lease-recovery-test</PATH>
<BLOCK>
<BLOCK_ID>1073741836</BLOCK_ID>
<NUM_BYTES>11</NUM_BYTES>
<GENSTAMP>1012</GENSTAMP>
</BLOCK>
<RPC_CLIENTID></RPC_CLIENTID>
<RPC_CALLID>-2</RPC_CALLID>
</DATA>
@ -832,15 +958,15 @@
<RECORD>
<OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
<DATA>
<TXID>64</TXID>
<GENSTAMPV2>1011</GENSTAMPV2>
<TXID>73</TXID>
<GENSTAMPV2>1013</GENSTAMPV2>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REASSIGN_LEASE</OPCODE>
<DATA>
<TXID>65</TXID>
<LEASEHOLDER>DFSClient_NONMAPREDUCE_1474796918_1</LEASEHOLDER>
<TXID>74</TXID>
<LEASEHOLDER>DFSClient_NONMAPREDUCE_526346936_1</LEASEHOLDER>
<PATH>/hard-lease-recovery-test</PATH>
<NEWHOLDER>HDFS_NameNode</NEWHOLDER>
</DATA>
@ -848,24 +974,24 @@
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
<TXID>66</TXID>
<TXID>75</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
<MTIME>1412114470807</MTIME>
<ATIME>1412114468401</ATIME>
<MTIME>1421131351230</MTIME>
<ATIME>1421131349098</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<OVERWRITE>false</OVERWRITE>
<BLOCK>
<BLOCK_ID>1073741834</BLOCK_ID>
<BLOCK_ID>1073741836</BLOCK_ID>
<NUM_BYTES>11</NUM_BYTES>
<GENSTAMP>1011</GENSTAMP>
<GENSTAMP>1013</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
<USERNAME>aagarwal</USERNAME>
<USERNAME>plamenjeliazkov</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@ -874,72 +1000,72 @@
<RECORD>
<OPCODE>OP_ADD_CACHE_POOL</OPCODE>
<DATA>
<TXID>67</TXID>
<TXID>76</TXID>
<POOLNAME>pool1</POOLNAME>
<OWNERNAME>aagarwal</OWNERNAME>
<OWNERNAME>plamenjeliazkov</OWNERNAME>
<GROUPNAME>staff</GROUPNAME>
<MODE>493</MODE>
<LIMIT>9223372036854775807</LIMIT>
<MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>79</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>72</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_MODIFY_CACHE_POOL</OPCODE>
<DATA>
<TXID>68</TXID>
<TXID>77</TXID>
<POOLNAME>pool1</POOLNAME>
<LIMIT>99</LIMIT>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>80</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>73</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ADD_CACHE_DIRECTIVE</OPCODE>
<DATA>
<TXID>69</TXID>
<TXID>78</TXID>
<ID>1</ID>
<PATH>/path</PATH>
<REPLICATION>1</REPLICATION>
<POOL>pool1</POOL>
<EXPIRATION>2305844421328165416</EXPIRATION>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>81</RPC_CALLID>
<EXPIRATION>2305844430345046085</EXPIRATION>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>74</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_MODIFY_CACHE_DIRECTIVE</OPCODE>
<DATA>
<TXID>70</TXID>
<TXID>79</TXID>
<ID>1</ID>
<REPLICATION>2</REPLICATION>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>82</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>75</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REMOVE_CACHE_DIRECTIVE</OPCODE>
<DATA>
<TXID>71</TXID>
<TXID>80</TXID>
<ID>1</ID>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>83</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>76</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REMOVE_CACHE_POOL</OPCODE>
<DATA>
<TXID>72</TXID>
<TXID>81</TXID>
<POOLNAME>pool1</POOLNAME>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>84</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>77</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_ACL</OPCODE>
<DATA>
<TXID>73</TXID>
<TXID>82</TXID>
<SRC>/file_concat_target</SRC>
<ENTRY>
<SCOPE>ACCESS</SCOPE>
@ -972,62 +1098,62 @@
<RECORD>
<OPCODE>OP_SET_XATTR</OPCODE>
<DATA>
<TXID>74</TXID>
<TXID>83</TXID>
<SRC>/file_concat_target</SRC>
<XATTR>
<NAMESPACE>USER</NAMESPACE>
<NAME>a1</NAME>
<VALUE>0x313233</VALUE>
</XATTR>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>86</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>79</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_XATTR</OPCODE>
<DATA>
<TXID>75</TXID>
<TXID>84</TXID>
<SRC>/file_concat_target</SRC>
<XATTR>
<NAMESPACE>USER</NAMESPACE>
<NAME>a2</NAME>
<VALUE>0x373839</VALUE>
</XATTR>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>87</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>80</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REMOVE_XATTR</OPCODE>
<DATA>
<TXID>76</TXID>
<TXID>85</TXID>
<SRC>/file_concat_target</SRC>
<XATTR>
<NAMESPACE>USER</NAMESPACE>
<NAME>a2</NAME>
</XATTR>
<RPC_CLIENTID>0a28b871-f75a-46a4-80e0-fe41cbb6b034</RPC_CLIENTID>
<RPC_CALLID>88</RPC_CALLID>
<RPC_CLIENTID>99bcddc1-3460-4630-9904-6c7ca5811945</RPC_CLIENTID>
<RPC_CALLID>81</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ROLLING_UPGRADE_START</OPCODE>
<DATA>
<TXID>77</TXID>
<STARTTIME>1412114471510</STARTTIME>
<TXID>86</TXID>
<STARTTIME>1421131352186</STARTTIME>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ROLLING_UPGRADE_FINALIZE</OPCODE>
<DATA>
<TXID>78</TXID>
<FINALIZETIME>1412114471510</FINALIZETIME>
<TXID>87</TXID>
<FINALIZETIME>1421131352186</FINALIZETIME>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_END_LOG_SEGMENT</OPCODE>
<DATA>
<TXID>79</TXID>
<TXID>88</TXID>
</DATA>
</RECORD>
</EDITS>