HDFS-9490. MiniDFSCluster should change block generation stamp via FsDatasetTestUtils. (Tony Wu via lei)
This commit is contained in:
parent
3fa33b5c2c
commit
0ac8fb4b33
|
@ -1710,6 +1710,9 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo
|
||||
instead of Block. (Mingliang Liu via jing9)
|
||||
|
||||
HDFS-9490. MiniDFSCluster should change block generation stamp via
|
||||
FsDatasetTestUtils. (Tony Wu via lei)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
|
|
@ -2141,12 +2141,10 @@ public class MiniDFSCluster {
|
|||
getMaterializedReplica(i, blk).truncateMeta(newSize);
|
||||
}
|
||||
|
||||
public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
|
||||
public void changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
|
||||
long newGenStamp) throws IOException {
|
||||
File blockFile = getBlockFile(dnIndex, blk);
|
||||
File metaFile = FsDatasetUtil.findMetaFile(blockFile);
|
||||
return metaFile.renameTo(new File(DatanodeUtil.getMetaName(
|
||||
blockFile.getAbsolutePath(), newGenStamp)));
|
||||
getFsDatasetTestUtils(dnIndex)
|
||||
.changeStoredGenerationStamp(blk, newGenStamp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -242,4 +242,13 @@ public interface FsDatasetTestUtils {
|
|||
* Get the persistently stored generation stamp.
|
||||
*/
|
||||
long getStoredGenerationStamp(ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Change the persistently stored generation stamp.
|
||||
* @param block the block whose generation stamp will be changed
|
||||
* @param newGenStamp the new generation stamp
|
||||
* @throws IOException
|
||||
*/
|
||||
void changeStoredGenerationStamp(ExtendedBlock block, long newGenStamp)
|
||||
throws IOException;
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
||||
|
@ -47,6 +48,7 @@ import java.io.IOException;
|
|||
import java.io.RandomAccessFile;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.Random;
|
||||
|
||||
/**
|
||||
|
@ -363,4 +365,16 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
|
|||
File[] files = FileUtil.listFiles(dir);
|
||||
return FsDatasetUtil.getGenerationStampFromFile(files, f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void changeStoredGenerationStamp(
|
||||
ExtendedBlock block, long newGenStamp) throws IOException {
|
||||
File blockFile =
|
||||
dataset.getBlockFile(block.getBlockPoolId(), block.getBlockId());
|
||||
File metaFile = FsDatasetUtil.findMetaFile(blockFile);
|
||||
File newMetaFile = new File(
|
||||
DatanodeUtil.getMetaName(blockFile.getAbsolutePath(), newGenStamp));
|
||||
Files.move(metaFile.toPath(), newMetaFile.toPath(),
|
||||
StandardCopyOption.ATOMIC_MOVE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ public class TestPendingCorruptDnMessages {
|
|||
// Change the gen stamp of the block on datanode to go back in time (gen
|
||||
// stamps start at 1000)
|
||||
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
|
||||
assertTrue(cluster.changeGenStampOfBlock(0, block, 900));
|
||||
cluster.changeGenStampOfBlock(0, block, 900);
|
||||
|
||||
// Run directory dsscanner to update Datanode's volumeMap
|
||||
DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0));
|
||||
|
|
Loading…
Reference in New Issue