HDFS-2864. Remove some redundant methods and the constant METADATA_VERSION from FSDataset.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1238969 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d1f805f942
commit
dbbfaebb71
|
@ -306,6 +306,9 @@ Release 0.23.1 - UNRELEASED
|
||||||
HDFS-2826. Add test case for HDFS-1476 (safemode can initialize
|
HDFS-2826. Add test case for HDFS-1476 (safemode can initialize
|
||||||
replication queues before exiting) (todd)
|
replication queues before exiting) (todd)
|
||||||
|
|
||||||
|
HDFS-2864. Remove some redundant methods and the constant METADATA_VERSION
|
||||||
|
from FSDataset. (szetszwo)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HDFS-2541. For a sufficiently large value of blocks, the DN Scanner
|
HDFS-2541. For a sufficiently large value of blocks, the DN Scanner
|
||||||
|
|
|
@ -31,13 +31,12 @@ import java.util.Map;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
|
||||||
import org.apache.hadoop.hdfs.util.DirectBufferPool;
|
import org.apache.hadoop.hdfs.util.DirectBufferPool;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -183,7 +182,7 @@ class BlockReaderLocal implements BlockReader {
|
||||||
BlockMetadataHeader header = BlockMetadataHeader
|
BlockMetadataHeader header = BlockMetadataHeader
|
||||||
.readHeader(new DataInputStream(checksumIn));
|
.readHeader(new DataInputStream(checksumIn));
|
||||||
short version = header.getVersion();
|
short version = header.getVersion();
|
||||||
if (version != FSDataset.METADATA_VERSION) {
|
if (version != BlockMetadataHeader.VERSION) {
|
||||||
LOG.warn("Wrong version (" + version + ") for metadata file for "
|
LOG.warn("Wrong version (" + version + ") for metadata file for "
|
||||||
+ blk + " ignoring ...");
|
+ blk + " ignoring ...");
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class BlockMetadataHeader {
|
public class BlockMetadataHeader {
|
||||||
|
|
||||||
static final short METADATA_VERSION = FSDataset.METADATA_VERSION;
|
public static final short VERSION = 1;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Header includes everything except the checksum(s) themselves.
|
* Header includes everything except the checksum(s) themselves.
|
||||||
|
@ -138,7 +138,7 @@ public class BlockMetadataHeader {
|
||||||
*/
|
*/
|
||||||
static void writeHeader(DataOutputStream out, DataChecksum checksum)
|
static void writeHeader(DataOutputStream out, DataChecksum checksum)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
writeHeader(out, new BlockMetadataHeader(METADATA_VERSION, checksum));
|
writeHeader(out, new BlockMetadataHeader(VERSION, checksum));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -425,9 +425,8 @@ class BlockPoolSliceScanner {
|
||||||
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false);
|
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false);
|
||||||
|
|
||||||
// If the block does not exists anymore, then its not an error
|
// If the block does not exists anymore, then its not an error
|
||||||
if ( dataset.getFile(block.getBlockPoolId(), block.getLocalBlock()) == null ) {
|
if (!dataset.contains(block)) {
|
||||||
LOG.info("Verification failed for " + block + ". Its ok since " +
|
LOG.info(block + " is no longer in the dataset.");
|
||||||
"it not in datanode dataset anymore.");
|
|
||||||
deleteBlock(block.getLocalBlock());
|
deleteBlock(block.getLocalBlock());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -226,7 +226,7 @@ class BlockSender implements java.io.Closeable {
|
||||||
// read and handle the common header here. For now just a version
|
// read and handle the common header here. For now just a version
|
||||||
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
|
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
|
||||||
short version = header.getVersion();
|
short version = header.getVersion();
|
||||||
if (version != FSDataset.METADATA_VERSION) {
|
if (version != BlockMetadataHeader.VERSION) {
|
||||||
LOG.warn("Wrong version (" + version + ") for metadata file for "
|
LOG.warn("Wrong version (" + version + ") for metadata file for "
|
||||||
+ block + " ignoring ...");
|
+ block + " ignoring ...");
|
||||||
}
|
}
|
||||||
|
|
|
@ -470,7 +470,7 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
// read and handle the common header here. For now just a version
|
// read and handle the common header here. For now just a version
|
||||||
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
|
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
|
||||||
short version = header.getVersion();
|
short version = header.getVersion();
|
||||||
if (version != FSDataset.METADATA_VERSION) {
|
if (version != BlockMetadataHeader.VERSION) {
|
||||||
DataNode.LOG.warn("Wrong version (" + version + ") for metadata file "
|
DataNode.LOG.warn("Wrong version (" + version + ") for metadata file "
|
||||||
+ metaFile + " ignoring ...");
|
+ metaFile + " ignoring ...");
|
||||||
}
|
}
|
||||||
|
@ -945,8 +945,7 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
//////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////
|
||||||
|
|
||||||
//Find better place?
|
//Find better place?
|
||||||
public static final String METADATA_EXTENSION = ".meta";
|
static final String METADATA_EXTENSION = ".meta";
|
||||||
public static final short METADATA_VERSION = 1;
|
|
||||||
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
|
static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
|
||||||
|
|
||||||
private static boolean isUnlinkTmpFile(File f) {
|
private static boolean isUnlinkTmpFile(File f) {
|
||||||
|
@ -1031,15 +1030,10 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Return the block file for the given ID */
|
|
||||||
public File findBlockFile(String bpid, long blockId) {
|
|
||||||
return getFile(bpid, blockId);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override // FSDatasetInterface
|
@Override // FSDatasetInterface
|
||||||
public synchronized Block getStoredBlock(String bpid, long blkid)
|
public synchronized Block getStoredBlock(String bpid, long blkid)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
File blockfile = findBlockFile(bpid, blkid);
|
File blockfile = getFile(bpid, blkid);
|
||||||
if (blockfile == null) {
|
if (blockfile == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -1259,8 +1253,7 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
/**
|
/**
|
||||||
* Get File name for a given block.
|
* Get File name for a given block.
|
||||||
*/
|
*/
|
||||||
public File getBlockFile(String bpid, Block b)
|
File getBlockFile(String bpid, Block b) throws IOException {
|
||||||
throws IOException {
|
|
||||||
File f = validateBlockFile(bpid, b);
|
File f = validateBlockFile(bpid, b);
|
||||||
if(f == null) {
|
if(f == null) {
|
||||||
if (DataNode.LOG.isDebugEnabled()) {
|
if (DataNode.LOG.isDebugEnabled()) {
|
||||||
|
@ -1291,7 +1284,10 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
*/
|
*/
|
||||||
private File getBlockFileNoExistsCheck(ExtendedBlock b)
|
private File getBlockFileNoExistsCheck(ExtendedBlock b)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
File f = getFile(b.getBlockPoolId(), b.getLocalBlock());
|
final File f;
|
||||||
|
synchronized(this) {
|
||||||
|
f = getFile(b.getBlockPoolId(), b.getLocalBlock().getBlockId());
|
||||||
|
}
|
||||||
if (f == null) {
|
if (f == null) {
|
||||||
throw new IOException("Block " + b + " is not valid");
|
throw new IOException("Block " + b + " is not valid");
|
||||||
}
|
}
|
||||||
|
@ -2021,7 +2017,10 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
*/
|
*/
|
||||||
File validateBlockFile(String bpid, Block b) {
|
File validateBlockFile(String bpid, Block b) {
|
||||||
//Should we check for metadata file too?
|
//Should we check for metadata file too?
|
||||||
File f = getFile(bpid, b);
|
final File f;
|
||||||
|
synchronized(this) {
|
||||||
|
f = getFile(bpid, b.getBlockId());
|
||||||
|
}
|
||||||
|
|
||||||
if(f != null ) {
|
if(f != null ) {
|
||||||
if(f.exists())
|
if(f.exists())
|
||||||
|
@ -2071,7 +2070,7 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
File f = null;
|
File f = null;
|
||||||
FSVolume v;
|
FSVolume v;
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
f = getFile(bpid, invalidBlks[i]);
|
f = getFile(bpid, invalidBlks[i].getBlockId());
|
||||||
ReplicaInfo dinfo = volumeMap.get(bpid, invalidBlks[i]);
|
ReplicaInfo dinfo = volumeMap.get(bpid, invalidBlks[i]);
|
||||||
if (dinfo == null ||
|
if (dinfo == null ||
|
||||||
dinfo.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
|
dinfo.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
|
||||||
|
@ -2130,11 +2129,10 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
datanode.notifyNamenodeDeletedBlock(block);
|
datanode.notifyNamenodeDeletedBlock(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override // {@link FSDatasetInterface}
|
||||||
* Turn the block identifier into a filename; ignore generation stamp!!!
|
public synchronized boolean contains(final ExtendedBlock block) {
|
||||||
*/
|
final long blockId = block.getLocalBlock().getBlockId();
|
||||||
public synchronized File getFile(String bpid, Block b) {
|
return getFile(block.getBlockPoolId(), blockId) != null;
|
||||||
return getFile(bpid, b.getBlockId());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2143,7 +2141,7 @@ public class FSDataset implements FSDatasetInterface {
|
||||||
* @param blockId a block's id
|
* @param blockId a block's id
|
||||||
* @return on disk data file path; null if the replica does not exist
|
* @return on disk data file path; null if the replica does not exist
|
||||||
*/
|
*/
|
||||||
private File getFile(String bpid, long blockId) {
|
File getFile(final String bpid, final long blockId) {
|
||||||
ReplicaInfo info = volumeMap.get(bpid, blockId);
|
ReplicaInfo info = volumeMap.get(bpid, blockId);
|
||||||
if (info != null) {
|
if (info != null) {
|
||||||
return info.getBlockFile();
|
return info.getBlockFile();
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.File;
|
|
||||||
import java.io.FilterInputStream;
|
import java.io.FilterInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
@ -27,13 +26,13 @@ import java.io.OutputStream;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
|
@ -303,6 +302,9 @@ public interface FSDatasetInterface extends FSDatasetMBean {
|
||||||
*/
|
*/
|
||||||
public BlockListAsLongs getBlockReport(String bpid);
|
public BlockListAsLongs getBlockReport(String bpid);
|
||||||
|
|
||||||
|
/** Does the dataset contain the block? */
|
||||||
|
public boolean contains(ExtendedBlock block);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Is the block valid?
|
* Is the block valid?
|
||||||
* @param b
|
* @param b
|
||||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
||||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -1165,7 +1166,7 @@ public class TestDFSShell extends TestCase {
|
||||||
for(int i = 0; i < blocks.length; i++) {
|
for(int i = 0; i < blocks.length; i++) {
|
||||||
FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
|
FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
|
||||||
for(Block b : blocks[i]) {
|
for(Block b : blocks[i]) {
|
||||||
files.add(ds.getBlockFile(poolId, b));
|
files.add(DataNodeTestUtils.getBlockFile(ds, poolId, b.getBlockId()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return files;
|
return files;
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||||
|
|
||||||
|
@ -139,7 +140,8 @@ public class TestFileAppend{
|
||||||
//
|
//
|
||||||
for (int i = 0; i < blocks.size(); i = i + 2) {
|
for (int i = 0; i < blocks.size(); i = i + 2) {
|
||||||
ExtendedBlock b = blocks.get(i).getBlock();
|
ExtendedBlock b = blocks.get(i).getBlock();
|
||||||
File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock());
|
final File f = DataNodeTestUtils.getBlockFile(dataset,
|
||||||
|
b.getBlockPoolId(), b.getLocalBlock().getBlockId());
|
||||||
File link = new File(f.toString() + ".link");
|
File link = new File(f.toString() + ".link");
|
||||||
System.out.println("Creating hardlink for File " + f + " to " + link);
|
System.out.println("Creating hardlink for File " + f + " to " + link);
|
||||||
HardLink.createHardLink(f, link);
|
HardLink.createHardLink(f, link);
|
||||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
@ -831,7 +832,8 @@ public class TestFileCreation extends junit.framework.TestCase {
|
||||||
FSDataset dataset = (FSDataset)datanode.data;
|
FSDataset dataset = (FSDataset)datanode.data;
|
||||||
ExtendedBlock blk = locatedblock.getBlock();
|
ExtendedBlock blk = locatedblock.getBlock();
|
||||||
Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
|
Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
|
||||||
File blockfile = dataset.findBlockFile(blk.getBlockPoolId(), b.getBlockId());
|
final File blockfile = DataNodeTestUtils.getBlockFile(dataset,
|
||||||
|
blk.getBlockPoolId(), b.getBlockId());
|
||||||
System.out.println("blockfile=" + blockfile);
|
System.out.println("blockfile=" + blockfile);
|
||||||
if (blockfile != null) {
|
if (blockfile != null) {
|
||||||
BufferedReader in = new BufferedReader(new FileReader(blockfile));
|
BufferedReader in = new BufferedReader(new FileReader(blockfile));
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
@ -38,4 +39,7 @@ public class DataNodeTestUtils {
|
||||||
return dn.getDNRegistrationForBP(bpid);
|
return dn.getDNRegistrationForBP(bpid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static File getBlockFile(FSDataset fsdataset, String bpid, long bid) {
|
||||||
|
return fsdataset.getFile(bpid, bid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,8 +81,8 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
|
||||||
CHECKSUM_NULL, 16*1024 );
|
CHECKSUM_NULL, 16*1024 );
|
||||||
byte[] nullCrcHeader = checksum.getHeader();
|
byte[] nullCrcHeader = checksum.getHeader();
|
||||||
nullCrcFileData = new byte[2 + nullCrcHeader.length];
|
nullCrcFileData = new byte[2 + nullCrcHeader.length];
|
||||||
nullCrcFileData[0] = (byte) ((FSDataset.METADATA_VERSION >>> 8) & 0xff);
|
nullCrcFileData[0] = (byte) ((BlockMetadataHeader.VERSION >>> 8) & 0xff);
|
||||||
nullCrcFileData[1] = (byte) (FSDataset.METADATA_VERSION & 0xff);
|
nullCrcFileData[1] = (byte) (BlockMetadataHeader.VERSION & 0xff);
|
||||||
for (int i = 0; i < nullCrcHeader.length; i++) {
|
for (int i = 0; i < nullCrcHeader.length; i++) {
|
||||||
nullCrcFileData[i+2] = nullCrcHeader[i];
|
nullCrcFileData[i+2] = nullCrcHeader[i];
|
||||||
}
|
}
|
||||||
|
@ -390,9 +390,7 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
|
||||||
Iterable<Block> injectBlocks) throws IOException {
|
Iterable<Block> injectBlocks) throws IOException {
|
||||||
ExtendedBlock blk = new ExtendedBlock();
|
ExtendedBlock blk = new ExtendedBlock();
|
||||||
if (injectBlocks != null) {
|
if (injectBlocks != null) {
|
||||||
int numInjectedBlocks = 0;
|
|
||||||
for (Block b: injectBlocks) { // if any blocks in list is bad, reject list
|
for (Block b: injectBlocks) { // if any blocks in list is bad, reject list
|
||||||
numInjectedBlocks++;
|
|
||||||
if (b == null) {
|
if (b == null) {
|
||||||
throw new NullPointerException("Null blocks in block list");
|
throw new NullPointerException("Null blocks in block list");
|
||||||
}
|
}
|
||||||
|
@ -555,31 +553,27 @@ public class SimulatedFSDataset implements FSDatasetInterface, Configurable{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private BInfo getBInfo(final ExtendedBlock b) {
|
||||||
|
final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
|
||||||
|
return map == null? null: map.get(b.getLocalBlock());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override // {@link FSDatasetInterface}
|
||||||
|
public boolean contains(ExtendedBlock block) {
|
||||||
|
return getBInfo(block) != null;
|
||||||
|
}
|
||||||
|
|
||||||
@Override // FSDatasetInterface
|
@Override // FSDatasetInterface
|
||||||
public synchronized boolean isValidBlock(ExtendedBlock b) {
|
public synchronized boolean isValidBlock(ExtendedBlock b) {
|
||||||
final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
|
final BInfo binfo = getBInfo(b);
|
||||||
if (map == null) {
|
return binfo != null && binfo.isFinalized();
|
||||||
return false;
|
|
||||||
}
|
|
||||||
BInfo binfo = map.get(b.getLocalBlock());
|
|
||||||
if (binfo == null) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return binfo.isFinalized();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check if a block is created but not finalized */
|
/* check if a block is created but not finalized */
|
||||||
@Override
|
@Override
|
||||||
public synchronized boolean isValidRbw(ExtendedBlock b) {
|
public synchronized boolean isValidRbw(ExtendedBlock b) {
|
||||||
final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
|
final BInfo binfo = getBInfo(b);
|
||||||
if (map == null) {
|
return binfo != null && !binfo.isFinalized();
|
||||||
return false;
|
|
||||||
}
|
|
||||||
BInfo binfo = map.get(b.getLocalBlock());
|
|
||||||
if (binfo == null) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return !binfo.isFinalized();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -352,7 +352,7 @@ public class TestDirectoryScanner extends TestCase {
|
||||||
|
|
||||||
// Added block has the same file as the one created by the test
|
// Added block has the same file as the one created by the test
|
||||||
File file = new File(getBlockFile(blockId));
|
File file = new File(getBlockFile(blockId));
|
||||||
assertEquals(file.getName(), fds.findBlockFile(bpid, blockId).getName());
|
assertEquals(file.getName(), fds.getFile(bpid, blockId).getName());
|
||||||
|
|
||||||
// Generation stamp is same as that of created file
|
// Generation stamp is same as that of created file
|
||||||
assertEquals(genStamp, replicainfo.getGenerationStamp());
|
assertEquals(genStamp, replicainfo.getGenerationStamp());
|
||||||
|
|
|
@ -101,7 +101,7 @@ public class TestSimulatedFSDataset extends TestCase {
|
||||||
InputStream metaInput = fsdataset.getMetaDataInputStream(b);
|
InputStream metaInput = fsdataset.getMetaDataInputStream(b);
|
||||||
DataInputStream metaDataInput = new DataInputStream(metaInput);
|
DataInputStream metaDataInput = new DataInputStream(metaInput);
|
||||||
short version = metaDataInput.readShort();
|
short version = metaDataInput.readShort();
|
||||||
assertEquals(FSDataset.METADATA_VERSION, version);
|
assertEquals(BlockMetadataHeader.VERSION, version);
|
||||||
DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
|
DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
|
||||||
assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
|
assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
|
||||||
assertEquals(0, checksum.getChecksumSize());
|
assertEquals(0, checksum.getChecksumSize());
|
||||||
|
|
Loading…
Reference in New Issue