From 7e9358feb326d48b8c4f00249e7af5023cebd2e2 Mon Sep 17 00:00:00 2001 From: Plamen Jeliazkov Date: Mon, 12 Jan 2015 21:53:52 -0800 Subject: [PATCH] HDFS-3107. Introduce truncate. Contributed by Plamen Jeliazkov. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/DFSClient.java | 15 + .../hadoop/hdfs/DistributedFileSystem.java | 15 +- .../hadoop/hdfs/protocol/ClientProtocol.java | 32 +- ...amenodeProtocolServerSideTranslatorPB.java | 14 + .../ClientNamenodeProtocolTranslatorPB.java | 16 + .../hadoop/hdfs/protocolPB/PBHelper.java | 6 +- .../BlockInfoUnderConstruction.java | 6 +- .../blockmanagement/DatanodeManager.java | 5 +- .../blockmanagement/DatanodeStorageInfo.java | 2 +- .../server/common/HdfsServerConstants.java | 7 + .../hadoop/hdfs/server/datanode/DataNode.java | 10 +- .../hdfs/server/namenode/FSDirectory.java | 66 ++- .../hdfs/server/namenode/FSEditLog.java | 15 + .../hdfs/server/namenode/FSEditLogLoader.java | 7 + .../hdfs/server/namenode/FSEditLogOp.java | 111 +++++ .../server/namenode/FSEditLogOpCodes.java | 1 + .../hdfs/server/namenode/FSNamesystem.java | 116 ++++- .../hdfs/server/namenode/INodeFile.java | 39 ++ .../server/namenode/NameNodeRpcServer.java | 17 + .../namenode/metrics/NameNodeMetrics.java | 5 + .../snapshot/FileWithSnapshotFeature.java | 37 +- .../server/protocol/BlockRecoveryCommand.java | 17 + .../main/proto/ClientNamenodeProtocol.proto | 11 + .../hadoop-hdfs/src/main/proto/hdfs.proto | 1 + .../org/apache/hadoop/hdfs/DFSTestUtil.java | 8 +- .../hdfs/TestDFSInotifyEventInputStream.java | 2 +- .../server/namenode/TestFileTruncate.java | 289 ++++++++++++ .../namenode/TestNamenodeRetryCache.java | 4 +- .../namenode/ha/TestRetryCacheWithHA.java | 4 +- .../src/test/resources/editsStored | Bin 5065 -> 5791 bytes .../src/test/resources/editsStored.xml | 426 ++++++++++++------ 32 files changed, 1102 insertions(+), 204 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index cefdc166247..6da066784ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -18,6 +18,8 @@ Trunk (Unreleased) HDFS-3125. Add JournalService to enable Journal Daemon. (suresh) + HDFS-3107. Introduce truncate. (Plamen Jeliazkov via shv) + IMPROVEMENTS HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 62db1fac8cc..f289da76886 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1916,6 +1916,21 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, SnapshotAccessControlException.class); } } + + /** + * Truncate a file to an indicated size + * See {@link ClientProtocol#truncate(String, long)}. + */ + public boolean truncate(String src, long newLength) throws IOException { + checkOpen(); + try { + return namenode.truncate(src, newLength, clientName); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + UnresolvedPathException.class); + } + } + /** * Delete file or directory. * See {@link ClientProtocol#delete(String, boolean)}. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index d4653acf66e..6284f61d4fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -626,7 +626,20 @@ public class DistributedFileSystem extends FileSystem { }.resolve(this, absDst); } } - + + /** + * Truncate the file in the indicated path to the indicated size. + * @param f The path to the file to be truncated + * @param newLength The size the file is to be truncated to + * + * @return true if and client does not need to wait for block recovery, + * false if client needs to wait for block recovery. + */ + public boolean truncate(Path f, final long newLength) throws IOException { + statistics.incrementWriteOps(1); + return dfs.truncate(getPathName(f), newLength); + } + @Override public boolean delete(Path f, final boolean recursive) throws IOException { statistics.incrementWriteOps(1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 2301575f0e3..749f3876df3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -521,7 +521,37 @@ public interface ClientProtocol { FileAlreadyExistsException, FileNotFoundException, NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, UnresolvedLinkException, SnapshotAccessControlException, IOException; - + + /** + * Truncate file src to new size. + * + *

+ * This implementation of truncate is purely a namespace operation if truncate + * occurs at a block boundary. Requires DataNode block recovery otherwise. + *

+ * @param src existing file + * @param newLength the target size + * + * @return true if and client does not need to wait for block recovery, + * false if client needs to wait for block recovery. + * + * @throws AccessControlException If access is denied + * @throws FileNotFoundException If file src is not found + * @throws SafeModeException truncate not allowed in safemode + * @throws UnresolvedLinkException If src contains a symlink + * @throws SnapshotAccessControlException if path is in RO snapshot + * @throws IOException If an I/O error occurred + */ + @Idempotent + public boolean truncate(String src, long newLength, String clientName) + throws AccessControlException, FileNotFoundException, SafeModeException, + UnresolvedLinkException, SnapshotAccessControlException, IOException; + /** * Delete the given file or directory from the file system. *

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 5b6609bfbbe..8bcc1eb77b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -181,6 +181,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSto import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; @@ -584,6 +586,18 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return VOID_RENAME2_RESPONSE; } + @Override + public TruncateResponseProto truncate(RpcController controller, + TruncateRequestProto req) throws ServiceException { + try { + boolean result = server.truncate(req.getSrc(), req.getNewLength(), + req.getClientName()); + return TruncateResponseProto.newBuilder().setResult(result).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public DeleteResponseProto delete(RpcController controller, DeleteRequestProto req) throws ServiceException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 58049204fc4..f3826af880d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -155,6 +155,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuo import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto; @@ -301,6 +302,21 @@ public class ClientNamenodeProtocolTranslatorPB implements } + @Override + public boolean truncate(String src, long newLength, String clientName) + throws IOException, UnresolvedLinkException { + TruncateRequestProto req = TruncateRequestProto.newBuilder() + .setSrc(src) + .setNewLength(newLength) + .setClientName(clientName) + .build(); + try { + return rpcProxy.truncate(null, req).getResult(); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override public LastBlockWithStatus append(String src, String clientName) throws AccessControlException, DSQuotaExceededException, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index ee6d58cbfae..3f6a7f3694c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -608,13 +608,15 @@ public class PBHelper { } LocatedBlockProto lb = PBHelper.convert((LocatedBlock)b); return RecoveringBlockProto.newBuilder().setBlock(lb) - .setNewGenStamp(b.getNewGenerationStamp()).build(); + .setNewGenStamp(b.getNewGenerationStamp()) + .setTruncateFlag(b.getTruncateFlag()).build(); } public static RecoveringBlock convert(RecoveringBlockProto b) { ExtendedBlock block = convert(b.getBlock().getB()); DatanodeInfo[] locs = convert(b.getBlock().getLocsList()); - return new RecoveringBlock(block, locs, b.getNewGenStamp()); + return new RecoveringBlock(block, locs, b.getNewGenStamp(), + b.getTruncateFlag()); } public static DatanodeInfoProto.AdminState convert( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index f19ad1c511b..28b179db1c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -273,7 +273,11 @@ public class BlockInfoUnderConstruction extends BlockInfo { * make it primary. */ public void initializeBlockRecovery(long recoveryId) { - setBlockUCState(BlockUCState.UNDER_RECOVERY); + initializeBlockRecovery(BlockUCState.UNDER_RECOVERY, recoveryId); + } + + public void initializeBlockRecovery(BlockUCState s, long recoveryId) { + setBlockUCState(s); blockRecoveryId = recoveryId; if (replicas.size() == 0) { NameNode.blockStateChangeLog.warn("BLOCK*" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 41d03633fea..918b8d99a78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -1439,10 +1440,12 @@ public class DatanodeManager { LOG.info("Skipped stale nodes for recovery : " + (storages.length - recoveryLocations.size())); } + boolean isTruncate = b.getBlockUCState().equals( + HdfsServerConstants.BlockUCState.BEING_TRUNCATED); brCommand.add(new RecoveringBlock( new ExtendedBlock(blockPoolId, b), DatanodeStorageInfo.toDatanodeInfos(recoveryLocations), - b.getBlockRecoveryId())); + b.getBlockRecoveryId(), isTruncate)); } else { // If too many replicas are stale, then choose all replicas to participate // in block recovery. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index a3198e20ef3..3ab10b4488c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -240,7 +240,7 @@ public class DatanodeStorageInfo { return result; } - boolean removeBlock(BlockInfo b) { + public boolean removeBlock(BlockInfo b) { blockList = b.listRemove(blockList, this); if (b.removeStorage(this)) { numBlocks--; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 9bba2c950b8..f2e7ff44e46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -299,6 +299,13 @@ public final class HdfsServerConstants { * which synchronizes the existing replicas contents. */ UNDER_RECOVERY, + /** + * The block is being truncated.
+ * When a file is truncated its last block may need to be truncated + * and needs to go through a recovery procedure, + * which synchronizes the existing replicas contents. + */ + BEING_TRUNCATED, /** * The block is committed.
* The client reported that all bytes are written to data-nodes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index d5d84290571..7f95f331500 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -2691,7 +2691,10 @@ public class DataNode extends ReconfigurableBase r.rInfo.getNumBytes() == finalizedLength) participatingList.add(r); } - newBlock.setNumBytes(finalizedLength); + if(rBlock.getTruncateFlag()) + newBlock.setNumBytes(rBlock.getBlock().getNumBytes()); + else + newBlock.setNumBytes(finalizedLength); break; case RBW: case RWR: @@ -2703,7 +2706,10 @@ public class DataNode extends ReconfigurableBase participatingList.add(r); } } - newBlock.setNumBytes(minLength); + if(rBlock.getTruncateFlag()) + newBlock.setNumBytes(rBlock.getBlock().getNumBytes()); + else + newBlock.setNumBytes(minLength); break; case RUR: case TEMPORARY: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index b39519f55ef..1948099fe9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1087,7 +1087,71 @@ public class FSDirectory implements Closeable { public INodeMap getINodeMap() { return inodeMap; } - + + /** + * FSEditLogLoader implementation. + * Unlike FSNamesystem.truncate, this will not schedule block recovery. + */ + void unprotectedTruncate(String src, String clientName, String clientMachine, + long newLength, long mtime) + throws UnresolvedLinkException, QuotaExceededException, + SnapshotAccessControlException, IOException { + INodesInPath iip = getINodesInPath(src, true); + BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + boolean onBlockBoundary = + unprotectedTruncate(iip, newLength, collectedBlocks, mtime); + + if(! onBlockBoundary) { + getFSNamesystem().prepareFileForWrite(src, + iip, clientName, clientMachine, false, false); + } + getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks); + } + + boolean truncate(INodesInPath iip, long newLength, + BlocksMapUpdateInfo collectedBlocks, + long mtime) + throws IOException { + writeLock(); + try { + return unprotectedTruncate(iip, newLength, collectedBlocks, mtime); + } finally { + writeUnlock(); + } + } + + /** + * Truncate has the following properties: + * 1.) Any block deletions occur now. + * 2.) INode length is truncated now – clients can only read up to new length. + * 3.) INode will be set to UC and lastBlock set to UNDER_RECOVERY. + * 4.) NN will trigger DN truncation recovery and waits for DNs to report. + * 5.) File is considered UNDER_RECOVERY until truncation recovery completes. + * 6.) Soft and hard Lease expiration require truncation recovery to complete. + * + * @return true if on the block boundary or false if recovery is need + */ + boolean unprotectedTruncate(INodesInPath iip, long newLength, + BlocksMapUpdateInfo collectedBlocks, + long mtime) throws IOException { + assert hasWriteLock(); + INodeFile file = iip.getLastINode().asFile(); + long oldDiskspace = file.diskspaceConsumed(); + long remainingLength = + file.collectBlocksBeyondMax(newLength, collectedBlocks); + file.setModificationTime(mtime); + updateCount(iip, 0, file.diskspaceConsumed() - oldDiskspace, true); + // If on block boundary, then return + long lastBlockDelta = remainingLength - newLength; + if(lastBlockDelta == 0) + return true; + // Set new last block length + BlockInfo lastBlock = file.getLastBlock(); + assert lastBlock.getNumBytes() - lastBlockDelta > 0 : "wrong block size"; + lastBlock.setNumBytes(lastBlock.getNumBytes() - lastBlockDelta); + return false; + } + /** * This method is always called with writeLock of FSDirectory held. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 4a29b59957d..d32aad933a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetStoragePolicyOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TruncateOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp; @@ -896,6 +897,20 @@ public class FSEditLog implements LogsPurgeable { logRpcIds(op, toLogRpcIds); logEdit(op); } + + /** + * Add truncate file record to edit log + */ + void logTruncate(String src, String clientName, String clientMachine, + long size, long timestamp) { + TruncateOp op = TruncateOp.getInstance(cache.get()) + .setPath(src) + .setClientName(clientName) + .setClientMachine(clientMachine) + .setNewLength(size) + .setTimestamp(timestamp); + logEdit(op); + } /** * Add legacy block generation stamp record to edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 011892646de..2ff3b772d2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TruncateOp; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormat.renameReservedPathsOnUpgrade; import static org.apache.hadoop.util.Time.now; @@ -853,6 +854,12 @@ public class FSEditLogLoader { } break; } + case OP_TRUNCATE: { + TruncateOp truncateOp = (TruncateOp) op; + fsDir.unprotectedTruncate(truncateOp.src, truncateOp.clientName, + truncateOp.clientMachine, truncateOp.newLength, truncateOp.timestamp); + break; + } case OP_SET_STORAGE_POLICY: { SetStoragePolicyOp setStoragePolicyOp = (SetStoragePolicyOp) op; final String path = renameReservedPathsOnUpgrade(setStoragePolicyOp.path, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 11026fc80ad..396fb08de64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -59,6 +59,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_XAT import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_START_LOG_SEGMENT; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SYMLINK; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TIMES; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TRUNCATE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_UPDATE_BLOCKS; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_UPDATE_MASTER_KEY; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_STORAGE_POLICY; @@ -180,6 +181,7 @@ public abstract class FSEditLogOp { inst.put(OP_START_LOG_SEGMENT, new LogSegmentOp(OP_START_LOG_SEGMENT)); inst.put(OP_END_LOG_SEGMENT, new LogSegmentOp(OP_END_LOG_SEGMENT)); inst.put(OP_UPDATE_BLOCKS, new UpdateBlocksOp()); + inst.put(OP_TRUNCATE, new TruncateOp()); inst.put(OP_ALLOW_SNAPSHOT, new AllowSnapshotOp()); inst.put(OP_DISALLOW_SNAPSHOT, new DisallowSnapshotOp()); @@ -2602,6 +2604,115 @@ public abstract class FSEditLogOp { readRpcIdsFromXml(st); } } + + static class TruncateOp extends FSEditLogOp { + String src; + String clientName; + String clientMachine; + long newLength; + long timestamp; + + private TruncateOp() { + super(OP_TRUNCATE); + } + + static TruncateOp getInstance(OpInstanceCache cache) { + return (TruncateOp)cache.get(OP_TRUNCATE); + } + + @Override + void resetSubFields() { + src = null; + clientName = null; + clientMachine = null; + newLength = 0L; + timestamp = 0L; + } + + TruncateOp setPath(String src) { + this.src = src; + return this; + } + + TruncateOp setClientName(String clientName) { + this.clientName = clientName; + return this; + } + + TruncateOp setClientMachine(String clientMachine) { + this.clientMachine = clientMachine; + return this; + } + + TruncateOp setNewLength(long newLength) { + this.newLength = newLength; + return this; + } + + TruncateOp setTimestamp(long timestamp) { + this.timestamp = timestamp; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + src = FSImageSerialization.readString(in); + clientName = FSImageSerialization.readString(in); + clientMachine = FSImageSerialization.readString(in); + newLength = FSImageSerialization.readLong(in); + timestamp = FSImageSerialization.readLong(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + FSImageSerialization.writeString(src, out); + FSImageSerialization.writeString(clientName, out); + FSImageSerialization.writeString(clientMachine, out); + FSImageSerialization.writeLong(newLength, out); + FSImageSerialization.writeLong(timestamp, out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "SRC", src); + XMLUtils.addSaxString(contentHandler, "CLIENTNAME", clientName); + XMLUtils.addSaxString(contentHandler, "CLIENTMACHINE", clientMachine); + XMLUtils.addSaxString(contentHandler, "NEWLENGTH", + Long.toString(newLength)); + XMLUtils.addSaxString(contentHandler, "TIMESTAMP", + Long.toString(timestamp)); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.src = st.getValue("SRC"); + this.clientName = st.getValue("CLIENTNAME"); + this.clientMachine = st.getValue("CLIENTMACHINE"); + this.newLength = Long.parseLong(st.getValue("NEWLENGTH")); + this.timestamp = Long.parseLong(st.getValue("TIMESTAMP")); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("TruncateOp [src="); + builder.append(src); + builder.append(", clientName="); + builder.append(clientName); + builder.append(", clientMachine="); + builder.append(clientMachine); + builder.append(", newLength="); + builder.append(newLength); + builder.append(", timestamp="); + builder.append(timestamp); + builder.append(", opCode="); + builder.append(opCode); + builder.append(", txid="); + builder.append(txid); + builder.append("]"); + return builder.toString(); + } + } /** * {@literal @Idempotent} for {@link ClientProtocol#recoverLease}. In the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java index 86be54adb7f..468e0482f31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java @@ -73,6 +73,7 @@ public enum FSEditLogOpCodes { OP_SET_XATTR ((byte) 43), OP_REMOVE_XATTR ((byte) 44), OP_SET_STORAGE_POLICY ((byte) 45), + OP_TRUNCATE ((byte) 46), // Note that the current range of the valid OP code is 0~127 OP_INVALID ((byte) -1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 36a43347566..c250838e48a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1906,6 +1906,114 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, logAuditEvent(true, "setTimes", src, null, auditStat); } + /** + * Truncate file to a lower length. + * Truncate cannot be reverted / recovered from as it causes data loss. + * Truncation at block boundary is atomic, otherwise it requires + * block recovery to truncate the last block of the file. + * + * @return true if and client does not need to wait for block recovery, + * false if client needs to wait for block recovery. + */ + boolean truncate(String src, long newLength, + String clientName, String clientMachine, + long mtime) + throws IOException, UnresolvedLinkException { + boolean ret; + try { + ret = truncateInt(src, newLength, clientName, clientMachine, mtime); + } catch (AccessControlException e) { + logAuditEvent(false, "truncate", src); + throw e; + } + return ret; + } + + boolean truncateInt(String srcArg, long newLength, + String clientName, String clientMachine, + long mtime) + throws IOException, UnresolvedLinkException { + String src = srcArg; + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src=" + + src + " newLength=" + newLength); + } + HdfsFileStatus stat = null; + FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); + boolean res; + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot truncate for " + src); + src = dir.resolvePath(pc, src, pathComponents); + res = truncateInternal(src, newLength, clientName, + clientMachine, mtime, pc); + stat = FSDirStatAndListingOp.getFileInfo(dir, src, false, + FSDirectory.isReservedRawName(src), true); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "truncate", src, null, stat); + return res; + } + + /** + * Truncate a file to a given size + * Update the count at each ancestor directory with quota + */ + boolean truncateInternal(String src, long newLength, + String clientName, String clientMachine, + long mtime, FSPermissionChecker pc) + throws IOException, UnresolvedLinkException { + assert hasWriteLock(); + INodesInPath iip = dir.getINodesInPath4Write(src, true); + if (isPermissionEnabled) { + dir.checkPathAccess(pc, iip, FsAction.WRITE); + } + INodeFile file = iip.getLastINode().asFile(); + // Data will be lost after truncate occurs so it cannot support snapshots. + if(file.isInLatestSnapshot(iip.getLatestSnapshotId())) + throw new HadoopIllegalArgumentException( + "Cannot truncate file with snapshot."); + // Opening an existing file for write. May need lease recovery. + recoverLeaseInternal(iip, src, clientName, clientMachine, false); + // Refresh INode as the file could have been closed + iip = dir.getINodesInPath4Write(src, true); + file = INodeFile.valueOf(iip.getLastINode(), src); + // Truncate length check. + long oldLength = file.computeFileSize(); + if(oldLength == newLength) + return true; + if(oldLength < newLength) + throw new HadoopIllegalArgumentException( + "Cannot truncate to a larger file size. Current size: " + oldLength + + ", truncate size: " + newLength + "."); + // Perform INodeFile truncation. + BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + boolean onBlockBoundary = dir.truncate(iip, newLength, + collectedBlocks, mtime); + + if(! onBlockBoundary) { + // Open file for write, but don't log into edits + prepareFileForWrite(src, iip, clientName, clientMachine, false, false); + file = INodeFile.valueOf(dir.getINode4Write(src), src); + initializeBlockRecovery(file); + } + getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime); + removeBlocks(collectedBlocks); + return onBlockBoundary; + } + + void initializeBlockRecovery(INodeFile inodeFile) throws IOException { + BlockInfo lastBlock = inodeFile.getLastBlock(); + long recoveryId = nextGenerationStamp(blockIdManager.isLegacyBlock(lastBlock)); + ((BlockInfoUnderConstruction)lastBlock).initializeBlockRecovery( + BlockUCState.BEING_TRUNCATED, recoveryId); + } + /** * Create a symbolic link. */ @@ -2615,7 +2723,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } else { final BlockInfo lastBlock = file.getLastBlock(); if (lastBlock != null - && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { + && (lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY || + lastBlock.getBlockUCState() == BlockUCState.BEING_TRUNCATED)) { throw new RecoveryInProgressException("Recovery in progress, file [" + src + "], " + "lease owner [" + lease.getHolder() + "]"); } else { @@ -3833,6 +3942,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, throw new AlreadyBeingCreatedException(message); case UNDER_CONSTRUCTION: case UNDER_RECOVERY: + case BEING_TRUNCATED: final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)lastBlock; // setup the last block locations from the blockManager if not known if (uc.getNumExpectedLocations() == 0) { @@ -3854,7 +3964,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, // start recovery of the last block for this file long blockRecoveryId = nextGenerationStamp(blockIdManager.isLegacyBlock(uc)); lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile); - uc.initializeBlockRecovery(blockRecoveryId); + if (uc.getBlockUCState() != BlockUCState.BEING_TRUNCATED) { + uc.initializeBlockRecovery(blockRecoveryId); + } leaseManager.renewLease(lease); // Cannot close file right now, since the last block requires recovery. // This may potentially cause infinite loop in lease recovery diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index b811f122d1f..d1ff2f78979 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -696,4 +696,43 @@ public class INodeFile extends INodeWithAdditionalFields out.print(blocks == null || blocks.length == 0? null: blocks[0]); out.println(); } + + /** + * Remove full blocks at the end file up to newLength + * @return sum of sizes of the remained blocks + */ + public long collectBlocksBeyondMax(final long max, + final BlocksMapUpdateInfo collectedBlocks) { + final BlockInfo[] oldBlocks = getBlocks(); + if (oldBlocks == null) + return 0; + //find the minimum n such that the size of the first n blocks > max + int n = 0; + long size = 0; + for(; n < oldBlocks.length && max > size; n++) { + size += oldBlocks[n].getNumBytes(); + } + if (n >= oldBlocks.length) + return size; + + // starting from block n, the data is beyond max. + // resize the array. + final BlockInfo[] newBlocks; + if (n == 0) { + newBlocks = BlockInfo.EMPTY_ARRAY; + } else { + newBlocks = new BlockInfo[n]; + System.arraycopy(oldBlocks, 0, newBlocks, 0, n); + } + // set new blocks + setBlocks(newBlocks); + + // collect the blocks beyond max + if (collectedBlocks != null) { + for(; n < oldBlocks.length; n++) { + collectedBlocks.addDeleteBlock(oldBlocks[n]); + } + } + return size; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 824094675da..6ef8fd6b175 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH; +import static org.apache.hadoop.util.Time.now; import java.io.FileNotFoundException; import java.io.IOException; @@ -882,6 +883,22 @@ class NameNodeRpcServer implements NamenodeProtocols { metrics.incrFilesRenamed(); } + @Override // ClientProtocol + public boolean truncate(String src, long newLength, String clientName) + throws IOException { + if(stateChangeLog.isDebugEnabled()) { + stateChangeLog.debug("*DIR* NameNode.truncate: " + src + " to " + + newLength); + } + String clientMachine = getClientMachine(); + try { + return namesystem.truncate( + src, newLength, clientName, clientMachine, now()); + } finally { + metrics.incrFilesTruncated(); + } + } + @Override // ClientProtocol public boolean delete(String src, boolean recursive) throws IOException { if (stateChangeLog.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index 42942dc51bc..94e845ba3c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -47,6 +47,7 @@ public class NameNodeMetrics { @Metric MutableCounterLong filesAppended; @Metric MutableCounterLong getBlockLocations; @Metric MutableCounterLong filesRenamed; + @Metric MutableCounterLong filesTruncated; @Metric MutableCounterLong getListingOps; @Metric MutableCounterLong deleteFileOps; @Metric("Number of files/dirs deleted by delete or rename operations") @@ -173,6 +174,10 @@ public class NameNodeMetrics { filesRenamed.incr(); } + public void incrFilesTruncated() { + filesTruncated.incr(); + } + public void incrFilesDeleted(long delta) { filesDeleted.incr(delta); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java index e3bf349609c..16f534f0a9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java @@ -21,7 +21,6 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; @@ -182,40 +181,6 @@ public class FileWithSnapshotFeature implements INode.Feature { max = file.computeFileSize(); } - collectBlocksBeyondMax(file, max, info); - } - - private void collectBlocksBeyondMax(final INodeFile file, final long max, - final BlocksMapUpdateInfo collectedBlocks) { - final BlockInfo[] oldBlocks = file.getBlocks(); - if (oldBlocks != null) { - //find the minimum n such that the size of the first n blocks > max - int n = 0; - for(long size = 0; n < oldBlocks.length && max > size; n++) { - size += oldBlocks[n].getNumBytes(); - } - - // starting from block n, the data is beyond max. - if (n < oldBlocks.length) { - // resize the array. - final BlockInfo[] newBlocks; - if (n == 0) { - newBlocks = BlockInfo.EMPTY_ARRAY; - } else { - newBlocks = new BlockInfo[n]; - System.arraycopy(oldBlocks, 0, newBlocks, 0, n); - } - - // set new blocks - file.setBlocks(newBlocks); - - // collect the blocks beyond max. - if (collectedBlocks != null) { - for(; n < oldBlocks.length; n++) { - collectedBlocks.addDeleteBlock(oldBlocks[n]); - } - } - } - } + file.collectBlocksBeyondMax(max, info); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java index b7199ba3803..c51203894c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java @@ -53,6 +53,7 @@ public class BlockRecoveryCommand extends DatanodeCommand { @InterfaceAudience.Private @InterfaceStability.Evolving public static class RecoveringBlock extends LocatedBlock { + private boolean truncate; private final long newGenerationStamp; /** @@ -63,6 +64,15 @@ public class BlockRecoveryCommand extends DatanodeCommand { this.newGenerationStamp = newGS; } + /** + * RecoveryingBlock with truncate option. + */ + public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS, + boolean truncate) { + this(b, locs, newGS); + this.truncate = truncate; + } + /** * Return the new generation stamp of the block, * which also plays role of the recovery id. @@ -70,6 +80,13 @@ public class BlockRecoveryCommand extends DatanodeCommand { public long getNewGenerationStamp() { return newGenerationStamp; } + + /** + * Return whether to truncate the block to the ExtendedBlock's length. + */ + public boolean getTruncateFlag() { + return truncate; + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 2c1d3cb9f3e..5c9f7528a79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -198,6 +198,16 @@ message ConcatRequestProto { message ConcatResponseProto { // void response } +message TruncateRequestProto { + required string src = 1; + required uint64 newLength = 2; + required string clientName = 3; +} + +message TruncateResponseProto { + required bool result = 1; +} + message RenameRequestProto { required string src = 1; required string dst = 2; @@ -722,6 +732,7 @@ service ClientNamenodeProtocol { rpc reportBadBlocks(ReportBadBlocksRequestProto) returns(ReportBadBlocksResponseProto); rpc concat(ConcatRequestProto) returns(ConcatResponseProto); + rpc truncate(TruncateRequestProto) returns(TruncateResponseProto); rpc rename(RenameRequestProto) returns(RenameResponseProto); rpc rename2(Rename2RequestProto) returns(Rename2ResponseProto); rpc delete(DeleteRequestProto) returns(DeleteResponseProto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 04a8f3f0fb0..d989c0a8eb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -556,6 +556,7 @@ enum ReplicaStateProto { message RecoveringBlockProto { required uint64 newGenStamp = 1; // New genstamp post recovery required LocatedBlockProto block = 2; // Block to be recovered + optional bool truncateFlag = 3; // Block needs to be truncated } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 01f5d2e1932..15f5f2eafcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1194,7 +1194,13 @@ public class DFSTestUtil { DFSTestUtil.createFile(filesystem, pathConcatFiles[1], length, replication, seed); filesystem.concat(pathConcatTarget, pathConcatFiles); - + + // OP_TRUNCATE 46 + length = blockSize * 2; + DFSTestUtil.createFile(filesystem, pathFileCreate, length, replication, + seed); + filesystem.truncate(pathFileCreate, blockSize); + // OP_SYMLINK 17 Path pathSymlink = new Path("/file_symlink"); fc.createSymlink(pathConcatTarget, pathSymlink, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java index 77a17eddeaa..75a4ad4311e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java @@ -71,7 +71,7 @@ public class TestDFSInotifyEventInputStream { */ @Test public void testOpcodeCount() { - Assert.assertEquals(47, FSEditLogOpCodes.values().length); + Assert.assertEquals(48, FSEditLogOpCodes.values().length); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java new file mode 100644 index 00000000000..ba9d04e914e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.net.InetAddress; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.AppendTestUtil; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; +import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.log4j.Level; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestFileTruncate { + static { + GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL); + GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.ALL); + } + static final int BLOCK_SIZE = 4; + static final short REPLICATION = 3; + static final int DATANODE_NUM = 3; + static final int SUCCESS_ATTEMPTS = 300; + static final int RECOVERY_ATTEMPTS = 600; + static final long SLEEP = 100L; + + static final long LOW_SOFTLIMIT = 100L; + static final long LOW_HARDLIMIT = 200L; + static final int SHORT_HEARTBEAT = 1; + + static Configuration conf; + static MiniDFSCluster cluster; + static DistributedFileSystem fs; + + @BeforeClass + public static void startUp() throws IOException { + conf = new HdfsConfiguration(); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE); + conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT); + cluster = new MiniDFSCluster.Builder(conf) + .format(true) + .numDataNodes(DATANODE_NUM) + .nameNodePort(NameNode.DEFAULT_PORT) + .waitSafeMode(true) + .build(); + fs = cluster.getFileSystem(); + } + + @AfterClass + public static void tearDown() throws IOException { + if(fs != null) fs.close(); + if(cluster != null) cluster.shutdown(); + } + + /** + * Truncate files of different sizes byte by byte. + */ + @Test + public void testBasicTruncate() throws IOException { + int startingFileSize = 3 * BLOCK_SIZE; + + Path parent = new Path("/test"); + fs.mkdirs(parent); + fs.setQuota(parent, 100, 1000); + byte[] contents = AppendTestUtil.initBuffer(startingFileSize); + for (int fileLength = startingFileSize; fileLength > 0; + fileLength -= BLOCK_SIZE - 1) { + for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) { + final Path p = new Path(parent, "testBasicTruncate" + fileLength); + writeContents(contents, fileLength, p); + + int newLength = fileLength - toTruncate; + boolean isReady = fs.truncate(p, newLength); + + if(!isReady) + checkBlockRecovery(p); + + FileStatus fileStatus = fs.getFileStatus(p); + assertThat(fileStatus.getLen(), is((long) newLength)); + + ContentSummary cs = fs.getContentSummary(parent); + assertEquals("Bad disk space usage", + cs.getSpaceConsumed(), newLength * REPLICATION); + // validate the file content + AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString()); + } + } + fs.delete(parent, true); + } + + /** + * Failure / recovery test for truncate. + * In this failure the DNs fail to recover the blocks and the NN triggers + * lease recovery. + * File stays in RecoveryInProgress until DataNodes report recovery. + */ + @Test + public void testTruncateFailure() throws IOException { + int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2; + int toTruncate = 1; + + byte[] contents = AppendTestUtil.initBuffer(startingFileSize); + final Path p = new Path("/testTruncateFailure"); + FSDataOutputStream out = fs.create(p, false, BLOCK_SIZE, REPLICATION, + BLOCK_SIZE); + out.write(contents, 0, startingFileSize); + try { + fs.truncate(p, 0); + fail("Truncate must fail on open file."); + } catch(IOException expected) {} + out.close(); + + cluster.shutdownDataNodes(); + NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) + .setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT); + + int newLength = startingFileSize - toTruncate; + boolean isReady = fs.truncate(p, newLength); + assertThat("truncate should have triggered block recovery.", + isReady, is(false)); + FileStatus fileStatus = fs.getFileStatus(p); + assertThat(fileStatus.getLen(), is((long) newLength)); + + boolean recoveryTriggered = false; + for(int i = 0; i < RECOVERY_ATTEMPTS; i++) { + String leaseHolder = + NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), + p.toUri().getPath()); + if(leaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER)) { + cluster.startDataNodes(conf, DATANODE_NUM, true, + HdfsServerConstants.StartupOption.REGULAR, null); + recoveryTriggered = true; + break; + } + try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {} + } + assertThat("lease recovery should have occurred in ~" + + SLEEP * RECOVERY_ATTEMPTS + " ms.", recoveryTriggered, is(true)); + + checkBlockRecovery(p); + + NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) + .setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, + HdfsConstants.LEASE_HARDLIMIT_PERIOD); + + fileStatus = fs.getFileStatus(p); + assertThat(fileStatus.getLen(), is((long) newLength)); + + AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString()); + fs.delete(p, false); + } + + /** + * EditLogOp load test for Truncate. + */ + @Test + public void testTruncateEditLogLoad() throws IOException { + int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2; + int toTruncate = 1; + + byte[] contents = AppendTestUtil.initBuffer(startingFileSize); + + final Path p = new Path("/testTruncateEditLogLoad"); + writeContents(contents, startingFileSize, p); + + int newLength = startingFileSize - toTruncate; + boolean isReady = fs.truncate(p, newLength); + assertThat("truncate should have triggered block recovery.", + isReady, is(false)); + + checkBlockRecovery(p); + + cluster.restartNameNode(); + + FileStatus fileStatus = fs.getFileStatus(p); + assertThat(fileStatus.getLen(), is((long) newLength)); + + AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString()); + fs.delete(p, false); + } + + /** + * Check truncate recovery. + */ + @Test + public void testTruncateLastBlock() throws IOException { + FSNamesystem fsn = cluster.getNamesystem(); + + String src = "/file"; + Path srcPath = new Path(src); + + byte[] contents = AppendTestUtil.initBuffer(BLOCK_SIZE); + writeContents(contents, BLOCK_SIZE, srcPath); + + INodeFile inode = fsn.getFSDirectory().getINode(src).asFile(); + long oldGenstamp = GenerationStamp.LAST_RESERVED_STAMP; + DatanodeDescriptor dn = DFSTestUtil.getLocalDatanodeDescriptor(); + DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo( + dn.getDatanodeUuid(), InetAddress.getLocalHost().getHostAddress()); + dn.isAlive = true; + + BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction( + new Block(0, 1, oldGenstamp), (short) 1, + HdfsServerConstants.BlockUCState.BEING_TRUNCATED, + new DatanodeStorageInfo[] {storage}); + + inode.setBlocks(new BlockInfo[] {blockInfo}); + fsn.writeLock(); + try { + fsn.initializeBlockRecovery(inode); + assertThat(inode.getLastBlock().getBlockUCState(), + is(HdfsServerConstants.BlockUCState.BEING_TRUNCATED)); + long blockRecoveryId = ((BlockInfoUnderConstruction) inode.getLastBlock()) + .getBlockRecoveryId(); + assertThat(blockRecoveryId, is(oldGenstamp + 2)); + } finally { + fsn.writeUnlock(); + } + } + + static void writeContents(byte[] contents, int fileLength, Path p) + throws IOException { + FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION, + BLOCK_SIZE); + out.write(contents, 0, fileLength); + out.close(); + } + + static void checkBlockRecovery(Path p) throws IOException { + boolean success = false; + for(int i = 0; i < SUCCESS_ATTEMPTS; i++) { + LocatedBlocks blocks = getLocatedBlocks(p); + boolean noLastBlock = blocks.getLastLocatedBlock() == null; + if(!blocks.isUnderConstruction() && + (noLastBlock || blocks.isLastBlockComplete())) { + success = true; + break; + } + try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {} + } + assertThat("inode should complete in ~" + SLEEP * SUCCESS_ATTEMPTS + " ms.", + success, is(true)); + } + + static LocatedBlocks getLocatedBlocks(Path src) throws IOException { + return fs.getClient().getLocatedBlocks(src.toString(), 0, Long.MAX_VALUE); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index b9e62e3f6e9..3084f2684fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -409,7 +409,7 @@ public class TestNamenodeRetryCache { LightWeightCache cacheSet = (LightWeightCache) namesystem.getRetryCache().getCacheSet(); - assertEquals(23, cacheSet.size()); + assertEquals(24, cacheSet.size()); Map oldEntries = new HashMap(); @@ -428,7 +428,7 @@ public class TestNamenodeRetryCache { assertTrue(namesystem.hasRetryCache()); cacheSet = (LightWeightCache) namesystem .getRetryCache().getCacheSet(); - assertEquals(23, cacheSet.size()); + assertEquals(24, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 3739bd9f52f..066fd668cfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -163,7 +163,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn0 = cluster.getNamesystem(0); LightWeightCache cacheSet = (LightWeightCache) fsn0.getRetryCache().getCacheSet(); - assertEquals(23, cacheSet.size()); + assertEquals(24, cacheSet.size()); Map oldEntries = new HashMap(); @@ -184,7 +184,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn1 = cluster.getNamesystem(1); cacheSet = (LightWeightCache) fsn1 .getRetryCache().getCacheSet(); - assertEquals(23, cacheSet.size()); + assertEquals(24, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 08607ebb520b4f22beb52980d20598e0d5b6e1da..002c506da4aa48d62a97d2527f3e2d5fec3977d9 100644 GIT binary patch literal 5791 zcmcgw4R8}h6yD_8G;LBEN)ZYLVrdH~n5Hd+%HLrrLQ7jfK&oQNwAVkktD8rBZjxwmzj(`f)`hz00#^5a zy|?e%y}fjFbQ~ZcJ>hG`3lKwH&ZT#{!$lu@ONi01!jNydf7*xU6z7J8{Tj^!n;!L~ zs4WB|vp8T*&5w^VCOEJ7$EB{r;X7p?5lo9lVs(^Pd8Cq3pX3fm!~l4CC!^{|hcsFm zsd-$T$6X`UR!g2L_kuazc`^REb&{{b=bc+;*v=d#j~o9`j;Bhh4U`lX6&8%0JT-UR z^qkz1p&7$6vxa474l8kzXlI7&zI6CF0nN6q<>!yGW{*!_7vX8#u{bdo{Ft8Gy6v}U zxR}M9l1N#@q;R9HyzOX{UO1XE1ibc^J)6QkN1aR^TL#e)3i1j{Sad2ja|$g8)JNTiA#WSGv%>3Cm^CgY>u)jpHv#~`szz+=D@Wa z#A-$B)#^O;QhzP56)Dx1-VqYh+3DhwQ=70`F|l+u$sKrhz1oYlvq{qE;%&Xa%NQ9b zwzCO|_Sd@W{FUB-lRFb}U!>Z8NrLftvX3tMK7#HtxbAF^ZE1C2>6MV;^xW}%bWJS% zs0&7;EGZicO^nb`MOjkKa%n)L{J%>oe%&Y6V$@5jD~ket`AcfgZAW|P@DH1CTfs7+ z#A`iw&0lGWf)2%?TeyLtqweOXi_BnRi!F~9cTn5l6|1W8JCoeKxwgYSeVuK{& zj|d?>e(K}d{oq>Zz~v1hoH_H}0L$HQl|}Dyf90izJ1g}=Rtskcff@h1b$9;-c4$>R z(#aAb<5dku)~KQ6r=ZZ5aH#i_2=xGbQTlSB=!}UF@@gXkqrk>xh7JeFI!6tBw=S@e0=MB%5k`lA8(tXxF0RNounQ!aDfs10 znZk%h6NeG_j6m;nUKN&LIplUDYE^qM8cq9Iy{E?ZJ)2(q90%-Tqey?IMHA@~M&a~e ztdJg_CK@qK5Eh0m1~a@Pr--!XSE|3TgF~gVDd=1zB%-~&Yqh$vsn`M2b*}Q!dD>HrRN7^2QwuJ;XgXjL%jE-1pm>BmvK4y!Tm1tSpQLgCBh*=o$ex5A91 znDm4og2NDI%hA@&SjX4UAqbwHm}urGSba#iQ}q?5;)4Q&F#@L^??jk2)T;JiAe#0I zx-4ql@*}03!c=+N7o#*;JA<(fmSY8t!s61!V1{>usSd~V4^G>`p;AxEKM_uyZEv24 zRo63}p$Zp!M6M9jOm%8AQ{j%wgC8nC+Wv2iw$`u3KT6 zDdr^{B@z-H!aOmpB~8uL;b=l7oT;JekK6wL5<8$MmtS;sKZvYRcBN6_c|lst0j!40 zY&AshIDWzotqKOb@lq4Z%*_W9)EHy112BF+{(j;{#GuiD7Q-><9ElJl(VJVO<(^!@326OvP2*yKT-BmtF@#R{w83R?{` z`ak~+p6-?56aLF^OK`Zg?T;zyuqGk`(DLfu3e%r=!PO)59^a<^%LM7vCw2n-!&FiT z|3eje%cLG_R(o(sEs@>l%Nkx){ze*xNKdDr=>rI~fonc#f7%l+eDfa!Uz90IqCDpX32%Mej3;Wv>C~JU-fL65$xLjd zrzzgbbaKR8zvP2f1GE}J8)>h}EPy6SjYrqxk(n+x&<`c^om$K!-spDr5}X;C++>P| z7wliF_B2h`Q${brH9VW^X>#|4Yf|u3D#90KdLMpFkSN5&|F3Gw=Jvh`IIqfnGJ{cn l`GU6M>Wf$(r2`m@`|k1u%SQfH)B`%K@I{%KyY%bt{{gvMEPwz2 literal 5065 zcmcgwYfMx}6rSD7V_`u7-y*VN>ldxwzf#lLV|c{KD`COjnh4_uC$D6GI;f(d#zoa@$Vth&3Gh;e140^4{fplCnP1 zMksm~5oZWW3R<`R;+&~@o440t^LmaEz`Uv_VF@K=<+j2iht2A;5gqX5p84R%wO)^r zdKzx9S{GX#%dO?1&gv?gW3j_tU8QSaNCPM2%*-k;vsJnZ^9%CxGN;YRp73f`cA+^X zH6_(D%3@9{G?Nf>QtHS=_%j1fMCg!%OCQb4X;}07)68=R_N4$&_>=`>R)oOKY=%k# zxrgD9=DGjb^Sn^~mOdWfqB&%cFJxHtqYrOES|pp-2m96&BTJydzRXt4Sr1D)5%Huk zvj#^jl_MEsZI4UaM%H&-oUTB->AWot2AHalh=k%YhpotEcdRU|SOWJviL=5>r>B)R zZ#g)66UwJQeIg{ZJ=)sqGxlM%2r;$~$sTfZn-(RkeMrA?uhsVjlnhQ3-adqcI4iAH z&QiO}EFFTLN=wbze$ak0$LP{Ih6gT53fHUL+B^^ET%^>sGdMX$@22QOt1ug}d?HwC zYJi_c#PVt8a{?aiMJ=B`Y1NGmNKk)CEFE^6;LLDW+A*Uv6%t-TuAW70QIjdigO(R{IkO?_l3D)xCaV7BcL{ z3m9(TU}_-g#`i6IWs%iY=(0K%+gxIs{fX-ZJ=%-XNu1>vNihLocVq?YgBKOLxA4aC zMX_)%DsU}>Db3A|gN*~=4tHs^Xlci2KU5s1O$ulq#`@{euLr(qf(8PS$^%05wKW$v zYpM9_O=;9mD;20V=@}XAfc`z58&PIj zIMXnMny)y$^A4-A-MkBN;)w)|X#o=SbkeWflZan=n_NF9---Vl4}nE2oLJ<0I}-b5 zo#^*$=D81-9$s@MJ*j<&hd3AK8O^+EAmMC>cCM@_FRNVA0X-cSSUAi}yW*A-Py8b^ z^CEKXzQSBrAx<4VSL(mk;zLt#zcV{3A6t8*cq zoka7DXK8NqG~Zda$mg{JsiGuVwagEh*{PEnwN$`JX_tF-wC++Nd$A%#pnN#k|XZ?C%K_kAxr*Yma5?E!omm^{< z@iZZUPM5W$MA!N@oafMD>D4WlY{8!;t6K2&Pd6#>6TG1K*@0Fh75lX8(C}~KpgM;J zdH)4~z3NH=6RNDPQh3X-98GjJT^=HZnm#oA%mF+$ByuPLiBNj@ll|9Y#;-poULObn z^xY4BB5$mdhRwJRCVZ9{ojuIQ)?eAe!5rC;EAMU3^NtWeoGG%CnIB1rF`s4(W^ z0;-)h2bAidR1aQHYl935c(U!Y1z+KTB%7x&IZh60G>|C0)!bJxCnZZeGyB28?5A3! zDV-uE^;J^SEK;O7ImJr@MnFC)&uMCmJSXTV2 1 - 1412805665311 - c1cad1109e33ae77 + 1421822547136 + 24319c7d1f7c0828 @@ -24,8 +24,8 @@ 3 2 - 1412805665314 - 0632068587d6574c + 1421822547140 + 254b1207021431f4 @@ -37,19 +37,19 @@ 16386 /file_create 1 - 1412114467969 - 1412114467969 + 1421131348286 + 1421131348286 512 - DFSClient_NONMAPREDUCE_1474796918_1 + DFSClient_NONMAPREDUCE_526346936_1 127.0.0.1 true - aagarwal + plamenjeliazkov supergroup 420 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 13 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 6 @@ -60,14 +60,14 @@ 0 /file_create 1 - 1412114468019 - 1412114467969 + 1421131348328 + 1421131348286 512 false - aagarwal + plamenjeliazkov supergroup 420 @@ -78,7 +78,7 @@ 6 /file_create - 12 + 7 @@ -88,9 +88,9 @@ 0 /file_create /file_moved - 1412114468027 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 16 + 1421131348343 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 9 @@ -99,9 +99,9 @@ 8 0 /file_moved - 1412114468034 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 17 + 1421131348353 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 10 @@ -111,9 +111,9 @@ 0 16387 /directory_mkdir - 1412114468041 + 1421131348366 - aagarwal + plamenjeliazkov supergroup 493 @@ -146,8 +146,8 @@ 13 /directory_mkdir snapshot1 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 22 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 15 @@ -157,8 +157,8 @@ /directory_mkdir snapshot1 snapshot2 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 23 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 16 @@ -167,8 +167,8 @@ 15 /directory_mkdir snapshot2 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 24 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 17 @@ -179,19 +179,19 @@ 16388 /file_create 1 - 1412114468073 - 1412114468073 + 1421131348401 + 1421131348401 512 - DFSClient_NONMAPREDUCE_1474796918_1 + DFSClient_NONMAPREDUCE_526346936_1 127.0.0.1 true - aagarwal + plamenjeliazkov supergroup 420 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 25 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 18 @@ -202,14 +202,14 @@ 0 /file_create 1 - 1412114468075 - 1412114468073 + 1421131348405 + 1421131348401 512 false - aagarwal + plamenjeliazkov supergroup 420 @@ -265,10 +265,10 @@ 0 /file_create /file_moved - 1412114468093 + 1421131348436 NONE - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 32 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 25 @@ -279,19 +279,19 @@ 16389 /file_concat_target 1 - 1412114468097 - 1412114468097 + 1421131348443 + 1421131348443 512 - DFSClient_NONMAPREDUCE_1474796918_1 + DFSClient_NONMAPREDUCE_526346936_1 127.0.0.1 true - aagarwal + plamenjeliazkov supergroup 420 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 34 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 27 @@ -396,8 +396,8 @@ 0 /file_concat_target 1 - 1412114468349 - 1412114468097 + 1421131348998 + 1421131348443 512 @@ -418,7 +418,7 @@ 1003 - aagarwal + plamenjeliazkov supergroup 420 @@ -432,19 +432,19 @@ 16390 /file_concat_0 1 - 1412114468351 - 1412114468351 + 1421131349001 + 1421131349001 512 - DFSClient_NONMAPREDUCE_1474796918_1 + DFSClient_NONMAPREDUCE_526346936_1 127.0.0.1 true - aagarwal + plamenjeliazkov supergroup 420 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 47 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 38 @@ -549,8 +549,8 @@ 0 /file_concat_0 1 - 1412114468370 - 1412114468351 + 1421131349032 + 1421131349001 512 @@ -571,7 +571,7 @@ 1006 - aagarwal + plamenjeliazkov supergroup 420 @@ -585,19 +585,19 @@ 16391 /file_concat_1 1 - 1412114468373 - 1412114468373 + 1421131349036 + 1421131349036 512 - DFSClient_NONMAPREDUCE_1474796918_1 + DFSClient_NONMAPREDUCE_526346936_1 127.0.0.1 true - aagarwal + plamenjeliazkov supergroup 420 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 59 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 47 @@ -702,8 +702,8 @@ 0 /file_concat_1 1 - 1412114468392 - 1412114468373 + 1421131349060 + 1421131349036 512 @@ -724,7 +724,7 @@ 1009 - aagarwal + plamenjeliazkov supergroup 420 @@ -736,76 +736,57 @@ 57 0 /file_concat_target - 1412114468395 + 1421131349064 /file_concat_0 /file_concat_1 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 70 - - - - OP_SYMLINK - - 58 - 0 - 16392 - /file_symlink - /file_concat_target - 1412114468398 - 1412114468398 - - aagarwal - supergroup - 511 - - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 71 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 55 OP_ADD - 59 + 58 0 - 16393 - /hard-lease-recovery-test + 16392 + /file_create 1 - 1412114468401 - 1412114468401 + 1421131349068 + 1421131349068 512 - DFSClient_NONMAPREDUCE_1474796918_1 + DFSClient_NONMAPREDUCE_526346936_1 127.0.0.1 true - aagarwal + plamenjeliazkov supergroup 420 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 72 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 57 OP_ALLOCATE_BLOCK_ID - 60 + 59 1073741834 OP_SET_GENSTAMP_V2 - 61 + 60 1010 OP_ADD_BLOCK - 62 - /hard-lease-recovery-test + 61 + /file_create 1073741834 0 @@ -816,15 +797,160 @@ - OP_UPDATE_BLOCKS + OP_ALLOCATE_BLOCK_ID + + 62 + 1073741835 + + + + OP_SET_GENSTAMP_V2 63 - /hard-lease-recovery-test + 1011 + + + + OP_ADD_BLOCK + + 64 + /file_create 1073741834 - 11 + 512 1010 + + 1073741835 + 0 + 1011 + + + -2 + + + + OP_CLOSE + + 65 + 0 + 0 + /file_create + 1 + 1421131349085 + 1421131349068 + 512 + + + false + + 1073741834 + 512 + 1010 + + + 1073741835 + 512 + 1011 + + + plamenjeliazkov + supergroup + 420 + + + + + OP_TRUNCATE + + 66 + /file_create + DFSClient_NONMAPREDUCE_526346936_1 + 127.0.0.1 + 512 + 1421131349088 + + + + OP_SYMLINK + + 67 + 0 + 16393 + /file_symlink + /file_concat_target + 1421131349095 + 1421131349095 + + plamenjeliazkov + supergroup + 511 + + 99bcddc1-3460-4630-9904-6c7ca5811945 + 64 + + + + OP_ADD + + 68 + 0 + 16394 + /hard-lease-recovery-test + 1 + 1421131349098 + 1421131349098 + 512 + DFSClient_NONMAPREDUCE_526346936_1 + 127.0.0.1 + true + + plamenjeliazkov + supergroup + 420 + + 99bcddc1-3460-4630-9904-6c7ca5811945 + 65 + + + + OP_ALLOCATE_BLOCK_ID + + 69 + 1073741836 + + + + OP_SET_GENSTAMP_V2 + + 70 + 1012 + + + + OP_ADD_BLOCK + + 71 + /hard-lease-recovery-test + + 1073741836 + 0 + 1012 + + + -2 + + + + OP_UPDATE_BLOCKS + + 72 + /hard-lease-recovery-test + + 1073741836 + 11 + 1012 + -2 @@ -832,15 +958,15 @@ OP_SET_GENSTAMP_V2 - 64 - 1011 + 73 + 1013 OP_REASSIGN_LEASE - 65 - DFSClient_NONMAPREDUCE_1474796918_1 + 74 + DFSClient_NONMAPREDUCE_526346936_1 /hard-lease-recovery-test HDFS_NameNode @@ -848,24 +974,24 @@ OP_CLOSE - 66 + 75 0 0 /hard-lease-recovery-test 1 - 1412114470807 - 1412114468401 + 1421131351230 + 1421131349098 512 false - 1073741834 + 1073741836 11 - 1011 + 1013 - aagarwal + plamenjeliazkov supergroup 420 @@ -874,72 +1000,72 @@ OP_ADD_CACHE_POOL - 67 + 76 pool1 - aagarwal + plamenjeliazkov staff 493 9223372036854775807 2305843009213693951 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 79 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 72 OP_MODIFY_CACHE_POOL - 68 + 77 pool1 99 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 80 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 73 OP_ADD_CACHE_DIRECTIVE - 69 + 78 1 /path 1 pool1 - 2305844421328165416 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 81 + 2305844430345046085 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 74 OP_MODIFY_CACHE_DIRECTIVE - 70 + 79 1 2 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 82 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 75 OP_REMOVE_CACHE_DIRECTIVE - 71 + 80 1 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 83 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 76 OP_REMOVE_CACHE_POOL - 72 + 81 pool1 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 84 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 77 OP_SET_ACL - 73 + 82 /file_concat_target ACCESS @@ -972,62 +1098,62 @@ OP_SET_XATTR - 74 + 83 /file_concat_target USER a1 0x313233 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 86 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 79 OP_SET_XATTR - 75 + 84 /file_concat_target USER a2 0x373839 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 87 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 80 OP_REMOVE_XATTR - 76 + 85 /file_concat_target USER a2 - 0a28b871-f75a-46a4-80e0-fe41cbb6b034 - 88 + 99bcddc1-3460-4630-9904-6c7ca5811945 + 81 OP_ROLLING_UPGRADE_START - 77 - 1412114471510 + 86 + 1421131352186 OP_ROLLING_UPGRADE_FINALIZE - 78 - 1412114471510 + 87 + 1421131352186 OP_END_LOG_SEGMENT - 79 + 88