HDFS-2864. Remove some redundant methods and the constant METADATA_VERSION from FSDataset.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1238969 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-02-01 07:46:50 +00:00
parent d1f805f942
commit dbbfaebb71
14 changed files with 65 additions and 61 deletions

View File

@ -306,6 +306,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2826. Add test case for HDFS-1476 (safemode can initialize HDFS-2826. Add test case for HDFS-1476 (safemode can initialize
replication queues before exiting) (todd) replication queues before exiting) (todd)
HDFS-2864. Remove some redundant methods and the constant METADATA_VERSION
from FSDataset. (szetszwo)
BUG FIXES BUG FIXES
HDFS-2541. For a sufficiently large value of blocks, the DN Scanner HDFS-2541. For a sufficiently large value of blocks, the DN Scanner

View File

@ -31,13 +31,12 @@
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.hdfs.util.DirectBufferPool;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
@ -183,7 +182,7 @@ static BlockReaderLocal newBlockReader(Configuration conf, String file,
BlockMetadataHeader header = BlockMetadataHeader BlockMetadataHeader header = BlockMetadataHeader
.readHeader(new DataInputStream(checksumIn)); .readHeader(new DataInputStream(checksumIn));
short version = header.getVersion(); short version = header.getVersion();
if (version != FSDataset.METADATA_VERSION) { if (version != BlockMetadataHeader.VERSION) {
LOG.warn("Wrong version (" + version + ") for metadata file for " LOG.warn("Wrong version (" + version + ") for metadata file for "
+ blk + " ignoring ..."); + blk + " ignoring ...");
} }

View File

@ -42,7 +42,7 @@
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class BlockMetadataHeader { public class BlockMetadataHeader {
static final short METADATA_VERSION = FSDataset.METADATA_VERSION; public static final short VERSION = 1;
/** /**
* Header includes everything except the checksum(s) themselves. * Header includes everything except the checksum(s) themselves.
@ -138,7 +138,7 @@ private static void writeHeader(DataOutputStream out,
*/ */
static void writeHeader(DataOutputStream out, DataChecksum checksum) static void writeHeader(DataOutputStream out, DataChecksum checksum)
throws IOException { throws IOException {
writeHeader(out, new BlockMetadataHeader(METADATA_VERSION, checksum)); writeHeader(out, new BlockMetadataHeader(VERSION, checksum));
} }
/** /**

View File

@ -425,9 +425,8 @@ private void verifyBlock(ExtendedBlock block) {
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false); updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false);
// If the block does not exists anymore, then its not an error // If the block does not exists anymore, then its not an error
if ( dataset.getFile(block.getBlockPoolId(), block.getLocalBlock()) == null ) { if (!dataset.contains(block)) {
LOG.info("Verification failed for " + block + ". Its ok since " + LOG.info(block + " is no longer in the dataset.");
"it not in datanode dataset anymore.");
deleteBlock(block.getLocalBlock()); deleteBlock(block.getLocalBlock());
return; return;
} }

View File

@ -226,7 +226,7 @@ class BlockSender implements java.io.Closeable {
// read and handle the common header here. For now just a version // read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
short version = header.getVersion(); short version = header.getVersion();
if (version != FSDataset.METADATA_VERSION) { if (version != BlockMetadataHeader.VERSION) {
LOG.warn("Wrong version (" + version + ") for metadata file for " LOG.warn("Wrong version (" + version + ") for metadata file for "
+ block + " ignoring ..."); + block + " ignoring ...");
} }

View File

@ -470,7 +470,7 @@ private long validateIntegrity(File blockFile, long genStamp) {
// read and handle the common header here. For now just a version // read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
short version = header.getVersion(); short version = header.getVersion();
if (version != FSDataset.METADATA_VERSION) { if (version != BlockMetadataHeader.VERSION) {
DataNode.LOG.warn("Wrong version (" + version + ") for metadata file " DataNode.LOG.warn("Wrong version (" + version + ") for metadata file "
+ metaFile + " ignoring ..."); + metaFile + " ignoring ...");
} }
@ -945,8 +945,7 @@ private void shutdown() {
////////////////////////////////////////////////////// //////////////////////////////////////////////////////
//Find better place? //Find better place?
public static final String METADATA_EXTENSION = ".meta"; static final String METADATA_EXTENSION = ".meta";
public static final short METADATA_VERSION = 1;
static final String UNLINK_BLOCK_SUFFIX = ".unlinked"; static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
private static boolean isUnlinkTmpFile(File f) { private static boolean isUnlinkTmpFile(File f) {
@ -1031,15 +1030,10 @@ private static long parseGenerationStamp(File blockFile, File metaFile
} }
} }
/** Return the block file for the given ID */
public File findBlockFile(String bpid, long blockId) {
return getFile(bpid, blockId);
}
@Override // FSDatasetInterface @Override // FSDatasetInterface
public synchronized Block getStoredBlock(String bpid, long blkid) public synchronized Block getStoredBlock(String bpid, long blkid)
throws IOException { throws IOException {
File blockfile = findBlockFile(bpid, blkid); File blockfile = getFile(bpid, blkid);
if (blockfile == null) { if (blockfile == null) {
return null; return null;
} }
@ -1259,8 +1253,7 @@ public File getBlockFile(ExtendedBlock b) throws IOException {
/** /**
* Get File name for a given block. * Get File name for a given block.
*/ */
public File getBlockFile(String bpid, Block b) File getBlockFile(String bpid, Block b) throws IOException {
throws IOException {
File f = validateBlockFile(bpid, b); File f = validateBlockFile(bpid, b);
if(f == null) { if(f == null) {
if (DataNode.LOG.isDebugEnabled()) { if (DataNode.LOG.isDebugEnabled()) {
@ -1291,7 +1284,10 @@ public InputStream getBlockInputStream(ExtendedBlock b)
*/ */
private File getBlockFileNoExistsCheck(ExtendedBlock b) private File getBlockFileNoExistsCheck(ExtendedBlock b)
throws IOException { throws IOException {
File f = getFile(b.getBlockPoolId(), b.getLocalBlock()); final File f;
synchronized(this) {
f = getFile(b.getBlockPoolId(), b.getLocalBlock().getBlockId());
}
if (f == null) { if (f == null) {
throw new IOException("Block " + b + " is not valid"); throw new IOException("Block " + b + " is not valid");
} }
@ -2021,7 +2017,10 @@ private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
*/ */
File validateBlockFile(String bpid, Block b) { File validateBlockFile(String bpid, Block b) {
//Should we check for metadata file too? //Should we check for metadata file too?
File f = getFile(bpid, b); final File f;
synchronized(this) {
f = getFile(bpid, b.getBlockId());
}
if(f != null ) { if(f != null ) {
if(f.exists()) if(f.exists())
@ -2071,7 +2070,7 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
File f = null; File f = null;
FSVolume v; FSVolume v;
synchronized (this) { synchronized (this) {
f = getFile(bpid, invalidBlks[i]); f = getFile(bpid, invalidBlks[i].getBlockId());
ReplicaInfo dinfo = volumeMap.get(bpid, invalidBlks[i]); ReplicaInfo dinfo = volumeMap.get(bpid, invalidBlks[i]);
if (dinfo == null || if (dinfo == null ||
dinfo.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) { dinfo.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
@ -2130,11 +2129,10 @@ public void notifyNamenodeDeletedBlock(ExtendedBlock block){
datanode.notifyNamenodeDeletedBlock(block); datanode.notifyNamenodeDeletedBlock(block);
} }
/** @Override // {@link FSDatasetInterface}
* Turn the block identifier into a filename; ignore generation stamp!!! public synchronized boolean contains(final ExtendedBlock block) {
*/ final long blockId = block.getLocalBlock().getBlockId();
public synchronized File getFile(String bpid, Block b) { return getFile(block.getBlockPoolId(), blockId) != null;
return getFile(bpid, b.getBlockId());
} }
/** /**
@ -2143,7 +2141,7 @@ public synchronized File getFile(String bpid, Block b) {
* @param blockId a block's id * @param blockId a block's id
* @return on disk data file path; null if the replica does not exist * @return on disk data file path; null if the replica does not exist
*/ */
private File getFile(String bpid, long blockId) { File getFile(final String bpid, final long blockId) {
ReplicaInfo info = volumeMap.get(bpid, blockId); ReplicaInfo info = volumeMap.get(bpid, blockId);
if (info != null) { if (info != null) {
return info.getBlockFile(); return info.getBlockFile();

View File

@ -19,7 +19,6 @@
import java.io.Closeable; import java.io.Closeable;
import java.io.File;
import java.io.FilterInputStream; import java.io.FilterInputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
@ -27,13 +26,13 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@ -303,6 +302,9 @@ public void recoverClose(ExtendedBlock b,
*/ */
public BlockListAsLongs getBlockReport(String bpid); public BlockListAsLongs getBlockReport(String bpid);
/** Does the dataset contain the block? */
public boolean contains(ExtendedBlock block);
/** /**
* Is the block valid? * Is the block valid?
* @param b * @param b

View File

@ -45,6 +45,7 @@
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.FSDataset;
import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -1165,7 +1166,7 @@ static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
for(int i = 0; i < blocks.length; i++) { for(int i = 0; i < blocks.length; i++) {
FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset(); FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
for(Block b : blocks[i]) { for(Block b : blocks[i]) {
files.add(ds.getBlockFile(poolId, b)); files.add(DataNodeTestUtils.getBlockFile(ds, poolId, b.getBlockId()));
} }
} }
return files; return files;

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.FSDataset;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@ -139,7 +140,8 @@ public void testCopyOnWrite() throws IOException {
// //
for (int i = 0; i < blocks.size(); i = i + 2) { for (int i = 0; i < blocks.size(); i = i + 2) {
ExtendedBlock b = blocks.get(i).getBlock(); ExtendedBlock b = blocks.get(i).getBlock();
File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock()); final File f = DataNodeTestUtils.getBlockFile(dataset,
b.getBlockPoolId(), b.getLocalBlock().getBlockId());
File link = new File(f.toString() + ".link"); File link = new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f + " to " + link); System.out.println("Creating hardlink for File " + f + " to " + link);
HardLink.createHardLink(f, link); HardLink.createHardLink(f, link);

View File

@ -45,6 +45,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.FSDataset;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@ -831,7 +832,8 @@ public void testLeaseExpireHardLimit() throws Exception {
FSDataset dataset = (FSDataset)datanode.data; FSDataset dataset = (FSDataset)datanode.data;
ExtendedBlock blk = locatedblock.getBlock(); ExtendedBlock blk = locatedblock.getBlock();
Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId()); Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
File blockfile = dataset.findBlockFile(blk.getBlockPoolId(), b.getBlockId()); final File blockfile = DataNodeTestUtils.getBlockFile(dataset,
blk.getBlockPoolId(), b.getBlockId());
System.out.println("blockfile=" + blockfile); System.out.println("blockfile=" + blockfile);
if (blockfile != null) { if (blockfile != null) {
BufferedReader in = new BufferedReader(new FileReader(blockfile)); BufferedReader in = new BufferedReader(new FileReader(blockfile));

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@ -38,4 +39,7 @@ public class DataNodeTestUtils {
return dn.getDNRegistrationForBP(bpid); return dn.getDNRegistrationForBP(bpid);
} }
public static File getBlockFile(FSDataset fsdataset, String bpid, long bid) {
return fsdataset.getFile(bpid, bid);
}
} }

View File

@ -81,8 +81,8 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
CHECKSUM_NULL, 16*1024 ); CHECKSUM_NULL, 16*1024 );
byte[] nullCrcHeader = checksum.getHeader(); byte[] nullCrcHeader = checksum.getHeader();
nullCrcFileData = new byte[2 + nullCrcHeader.length]; nullCrcFileData = new byte[2 + nullCrcHeader.length];
nullCrcFileData[0] = (byte) ((FSDataset.METADATA_VERSION >>> 8) & 0xff); nullCrcFileData[0] = (byte) ((BlockMetadataHeader.VERSION >>> 8) & 0xff);
nullCrcFileData[1] = (byte) (FSDataset.METADATA_VERSION & 0xff); nullCrcFileData[1] = (byte) (BlockMetadataHeader.VERSION & 0xff);
for (int i = 0; i < nullCrcHeader.length; i++) { for (int i = 0; i < nullCrcHeader.length; i++) {
nullCrcFileData[i+2] = nullCrcHeader[i]; nullCrcFileData[i+2] = nullCrcHeader[i];
} }
@ -390,9 +390,7 @@ public synchronized void injectBlocks(String bpid,
Iterable<Block> injectBlocks) throws IOException { Iterable<Block> injectBlocks) throws IOException {
ExtendedBlock blk = new ExtendedBlock(); ExtendedBlock blk = new ExtendedBlock();
if (injectBlocks != null) { if (injectBlocks != null) {
int numInjectedBlocks = 0;
for (Block b: injectBlocks) { // if any blocks in list is bad, reject list for (Block b: injectBlocks) { // if any blocks in list is bad, reject list
numInjectedBlocks++;
if (b == null) { if (b == null) {
throw new NullPointerException("Null blocks in block list"); throw new NullPointerException("Null blocks in block list");
} }
@ -555,31 +553,27 @@ public synchronized void invalidate(String bpid, Block[] invalidBlks)
} }
} }
private BInfo getBInfo(final ExtendedBlock b) {
final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
return map == null? null: map.get(b.getLocalBlock());
}
@Override // {@link FSDatasetInterface}
public boolean contains(ExtendedBlock block) {
return getBInfo(block) != null;
}
@Override // FSDatasetInterface @Override // FSDatasetInterface
public synchronized boolean isValidBlock(ExtendedBlock b) { public synchronized boolean isValidBlock(ExtendedBlock b) {
final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId()); final BInfo binfo = getBInfo(b);
if (map == null) { return binfo != null && binfo.isFinalized();
return false;
}
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
return false;
}
return binfo.isFinalized();
} }
/* check if a block is created but not finalized */ /* check if a block is created but not finalized */
@Override @Override
public synchronized boolean isValidRbw(ExtendedBlock b) { public synchronized boolean isValidRbw(ExtendedBlock b) {
final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId()); final BInfo binfo = getBInfo(b);
if (map == null) { return binfo != null && !binfo.isFinalized();
return false;
}
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
return false;
}
return !binfo.isFinalized();
} }
@Override @Override

View File

@ -352,7 +352,7 @@ private void verifyAddition(long blockId, long genStamp, long size) {
// Added block has the same file as the one created by the test // Added block has the same file as the one created by the test
File file = new File(getBlockFile(blockId)); File file = new File(getBlockFile(blockId));
assertEquals(file.getName(), fds.findBlockFile(bpid, blockId).getName()); assertEquals(file.getName(), fds.getFile(bpid, blockId).getName());
// Generation stamp is same as that of created file // Generation stamp is same as that of created file
assertEquals(genStamp, replicainfo.getGenerationStamp()); assertEquals(genStamp, replicainfo.getGenerationStamp());

View File

@ -101,7 +101,7 @@ public void testGetMetaData() throws IOException {
InputStream metaInput = fsdataset.getMetaDataInputStream(b); InputStream metaInput = fsdataset.getMetaDataInputStream(b);
DataInputStream metaDataInput = new DataInputStream(metaInput); DataInputStream metaDataInput = new DataInputStream(metaInput);
short version = metaDataInput.readShort(); short version = metaDataInput.readShort();
assertEquals(FSDataset.METADATA_VERSION, version); assertEquals(BlockMetadataHeader.VERSION, version);
DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput); DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType()); assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
assertEquals(0, checksum.getChecksumSize()); assertEquals(0, checksum.getChecksumSize());