HDFS-12884. BlockUnderConstructionFeature.truncateBlock should be of type BlockInfo. Contributed by chencan.

This commit is contained in:
Konstantin V Shvachko 2018-03-21 16:46:03 -07:00
parent 5aa7052e31
commit 8d898ab25f
5 changed files with 10 additions and 12 deletions

View File

@ -60,7 +60,7 @@ public class BlockUnderConstructionFeature {
/**
* The block source to use in the event of copy-on-write truncate.
*/
private Block truncateBlock;
private BlockInfo truncateBlock;
public BlockUnderConstructionFeature(Block blk,
BlockUCState state, DatanodeStorageInfo[] targets, BlockType blockType) {
@ -193,11 +193,11 @@ public class BlockUnderConstructionFeature {
}
/** Get recover block */
public Block getTruncateBlock() {
public BlockInfo getTruncateBlock() {
return truncateBlock;
}
public void setTruncateBlock(Block recoveryBlock) {
public void setTruncateBlock(BlockInfo recoveryBlock) {
this.truncateBlock = recoveryBlock;
}

View File

@ -104,7 +104,7 @@ final class FSDirTruncateOp {
final BlockInfo last = file.getLastBlock();
if (last != null && last.getBlockUCState()
== BlockUCState.UNDER_RECOVERY) {
final Block truncatedBlock = last.getUnderConstructionFeature()
final BlockInfo truncatedBlock = last.getUnderConstructionFeature()
.getTruncateBlock();
if (truncatedBlock != null) {
final long truncateLength = file.computeFileSize(false, false)
@ -259,7 +259,8 @@ final class FSDirTruncateOp {
oldBlock = file.getLastBlock();
assert !oldBlock.isComplete() : "oldBlock should be under construction";
BlockUnderConstructionFeature uc = oldBlock.getUnderConstructionFeature();
uc.setTruncateBlock(new Block(oldBlock));
uc.setTruncateBlock(new BlockInfoContiguous(oldBlock,
oldBlock.getReplication()));
uc.getTruncateBlock().setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
uc.getTruncateBlock().setGenerationStamp(newBlock.getGenerationStamp());
truncatedBlockUC = oldBlock;

View File

@ -3376,7 +3376,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
BlockUnderConstructionFeature uc =
lastBlock.getUnderConstructionFeature();
// determine if last block was intended to be truncated
Block recoveryBlock = uc.getTruncateBlock();
BlockInfo recoveryBlock = uc.getTruncateBlock();
boolean truncateRecovery = recoveryBlock != null;
boolean copyOnTruncate = truncateRecovery &&
recoveryBlock.getBlockId() != lastBlock.getBlockId();

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@ -1054,12 +1053,11 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
if(uc == null) {
return;
}
Block truncateBlock = uc.getTruncateBlock();
BlockInfo truncateBlock = uc.getTruncateBlock();
if(truncateBlock == null || truncateBlock.equals(toDelete)) {
return;
}
assert truncateBlock instanceof BlockInfo : "should be BlockInfo";
addDeleteBlock((BlockInfo) truncateBlock);
addDeleteBlock(truncateBlock);
}
public void addUpdateReplicationFactor(BlockInfo block, short targetRepl) {

View File

@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@ -129,7 +128,7 @@ public class FileDiffList extends
}
// Check if last block is part of truncate recovery
BlockInfo lastBlock = file.getLastBlock();
Block dontRemoveBlock = null;
BlockInfo dontRemoveBlock = null;
if (lastBlock != null && lastBlock.getBlockUCState().equals(
HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
dontRemoveBlock = lastBlock.getUnderConstructionFeature()