HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed by Plamen Jeliazkov.
This commit is contained in:
parent
116a7f1a16
commit
f6bdcd938e
|
@ -833,6 +833,9 @@ Release 2.7.0 - UNRELEASED
|
||||||
HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not
|
HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not
|
||||||
idempotent (Tsz Wo Nicholas Sze via brandonli)
|
idempotent (Tsz Wo Nicholas Sze via brandonli)
|
||||||
|
|
||||||
|
HDFS-7903. Cannot recover block after truncate and delete snapshot.
|
||||||
|
(Plamen Jeliazkov via shv)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
|
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
|
||||||
|
|
|
@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
|
@ -125,9 +128,19 @@ public class FileDiffList extends
|
||||||
continue;
|
continue;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Collect the remaining blocks of the file
|
// Check if last block is part of truncate recovery
|
||||||
while(i < removedBlocks.length) {
|
BlockInfoContiguous lastBlock = file.getLastBlock();
|
||||||
collectedBlocks.addDeleteBlock(removedBlocks[i++]);
|
Block dontRemoveBlock = null;
|
||||||
|
if(lastBlock != null && lastBlock.getBlockUCState().equals(
|
||||||
|
HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
|
||||||
|
dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
|
||||||
|
.getTruncateBlock();
|
||||||
|
}
|
||||||
|
// Collect the remaining blocks of the file, ignoring truncate block
|
||||||
|
for(;i < removedBlocks.length; i++) {
|
||||||
|
if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) {
|
||||||
|
collectedBlocks.addDeleteBlock(removedBlocks[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -178,6 +178,36 @@ public class TestFileTruncate {
|
||||||
fs.delete(dir, true);
|
fs.delete(dir, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Truncate the same file multiple times until its size is zero. */
|
||||||
|
@Test
|
||||||
|
public void testSnapshotTruncateThenDeleteSnapshot() throws IOException {
|
||||||
|
Path dir = new Path("/testSnapshotTruncateThenDeleteSnapshot");
|
||||||
|
fs.mkdirs(dir);
|
||||||
|
fs.allowSnapshot(dir);
|
||||||
|
final Path p = new Path(dir, "file");
|
||||||
|
final byte[] data = new byte[BLOCK_SIZE];
|
||||||
|
DFSUtil.getRandom().nextBytes(data);
|
||||||
|
writeContents(data, data.length, p);
|
||||||
|
final String snapshot = "s0";
|
||||||
|
fs.createSnapshot(dir, snapshot);
|
||||||
|
Block lastBlock = getLocatedBlocks(p).getLastLocatedBlock()
|
||||||
|
.getBlock().getLocalBlock();
|
||||||
|
final int newLength = data.length - 1;
|
||||||
|
assert newLength % BLOCK_SIZE != 0 :
|
||||||
|
" newLength must not be multiple of BLOCK_SIZE";
|
||||||
|
final boolean isReady = fs.truncate(p, newLength);
|
||||||
|
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
|
||||||
|
assertEquals("File must be closed for truncating at the block boundary",
|
||||||
|
isReady, newLength % BLOCK_SIZE == 0);
|
||||||
|
fs.deleteSnapshot(dir, snapshot);
|
||||||
|
if (!isReady) {
|
||||||
|
checkBlockRecovery(p);
|
||||||
|
}
|
||||||
|
checkFullFile(p, newLength, data);
|
||||||
|
assertBlockNotPresent(lastBlock);
|
||||||
|
fs.delete(dir, true);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Truncate files and then run other operations such as
|
* Truncate files and then run other operations such as
|
||||||
* rename, set replication, set permission, etc.
|
* rename, set replication, set permission, etc.
|
||||||
|
|
Loading…
Reference in New Issue