HDFS-9516. Truncate file fails with data dirs on multiple disks. Contributed by Plamen Jeliazkov.

This commit is contained in:
Plamen Jeliazkov 2015-12-15 00:10:33 -08:00 committed by Konstantin V Shvachko
parent be50df687b
commit 15124c86d8
2 changed files with 12 additions and 5 deletions

View File

@ -30,6 +30,9 @@ Release 2.9.0 - UNRELEASED
BUG FIXES
HDFS-9516. Truncate file fails with data dirs on multiple disks.
(Plamen Jeliazkov via shv)
Release 2.8.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -2505,8 +2505,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
} else {
// Copying block to a new block with new blockId.
// Not truncating original block.
FsVolumeSpi volume = rur.getVolume();
String blockPath = blockFile.getAbsolutePath();
String volumePath = volume.getBasePath();
assert blockPath.startsWith(volumePath) :
"New block file: " + blockPath + " must be on " +
"same volume as recovery replica: " + volumePath;
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
newBlockId, recoveryId, rur.getVolume(), blockFile.getParentFile(),
newBlockId, recoveryId, volume, blockFile.getParentFile(),
newlength);
newReplicaInfo.setNumBytes(newlength);
volumeMap.add(bpid, newReplicaInfo);
@ -2522,10 +2528,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
throws IOException {
String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
FsVolumeReference v = volumes.getNextVolume(
replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes());
final File tmpDir = ((FsVolumeImpl) v.getVolume())
.getBlockPoolSlice(bpid).getTmpDir();
FsVolumeImpl v = (FsVolumeImpl) replicaInfo.getVolume();
final File tmpDir = v.getBlockPoolSlice(bpid).getTmpDir();
final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
final File dstBlockFile = new File(destDir, blockFileName);
final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);