From 75f67416fb15a65dd7dfd265d3ae80d87374a0a0 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 13 Mar 2012 22:53:35 +0000 Subject: [PATCH] svn merge -c 1300392 from trunk for HDFS-3082. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1300393 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/datanode/BPOfferService.java | 18 +------ .../hdfs/server/datanode/BlockSender.java | 7 +-- .../hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../hdfs/server/datanode/FSDataset.java | 31 ++--------- .../server/datanode/FSDatasetInterface.java | 43 +++------------ .../apache/hadoop/hdfs/MiniDFSCluster.java | 2 +- .../apache/hadoop/hdfs/TestFileAppend3.java | 4 +- .../apache/hadoop/hdfs/TestFileCreation.java | 3 +- .../apache/hadoop/hdfs/TestLeaseRecovery.java | 5 +- .../hdfs/TestShortCircuitLocalRead.java | 4 +- .../server/datanode/SimulatedFSDataset.java | 52 +++---------------- .../datanode/TestSimulatedFSDataset.java | 2 +- 13 files changed, 40 insertions(+), 136 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 297e9fef438..2dd2dc2ed83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -135,6 +135,9 @@ Release 0.23.3 - UNRELEASED HDFS-2731. Add command to bootstrap the Standby Node's name directories from the Active NameNode. (todd) + HDFS-3082. Clean up FSDatasetInterface and change DataNode.data to package + private. (szetszwo) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the namenode state. (Tomasz Nykiel via hairong) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index d7227acdf7e..52a6eb7bb62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -29,21 +29,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; -import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; -import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; @@ -51,16 +43,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; -import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; -import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; -import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Sets; @@ -579,7 +563,7 @@ class BPOfferService { dn.blockScanner.deleteBlocks(bcmd.getBlockPoolId(), toDelete); } // using global fsdataset - dn.data.invalidate(bcmd.getBlockPoolId(), toDelete); + dn.getFSDataset().invalidate(bcmd.getBlockPoolId(), toDelete); } catch(IOException e) { dn.checkDiskError(); throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 1aeb7e1f41b..d77c28ca075 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -219,9 +219,10 @@ class BlockSender implements java.io.Closeable { (!is32Bit || length <= Integer.MAX_VALUE); DataChecksum csum; - if (!corruptChecksumOk || datanode.data.metaFileExists(block)) { - checksumIn = new DataInputStream(new BufferedInputStream(datanode.data - .getMetaDataInputStream(block), HdfsConstants.IO_FILE_BUFFER_SIZE)); + final InputStream metaIn = datanode.data.getMetaDataInputStream(block); + if (!corruptChecksumOk || metaIn != null) { + checksumIn = new DataInputStream( + new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); // read and handle the common header here. For now just a version BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index f9eaf9be201..59b2882b588 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -235,7 +235,7 @@ public class DataNode extends Configured volatile boolean shouldRun = true; private BlockPoolManager blockPoolManager; - public volatile FSDatasetInterface data = null; + volatile FSDatasetInterface data = null; private String clusterId = null; public final static String EMPTY_DEL_HINT = ""; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java index 0cf0d0f6e78..da46a0534b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java @@ -1036,23 +1036,14 @@ class FSDataset implements FSDatasetInterface { return null; } - @Override // FSDatasetInterface - public boolean metaFileExists(ExtendedBlock b) throws IOException { - return getMetaFile(b).exists(); - } - - @Override // FSDatasetInterface - public long getMetaDataLength(ExtendedBlock b) throws IOException { - File checksumFile = getMetaFile(b); - return checksumFile.length(); - } - @Override // FSDatasetInterface public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b) throws IOException { - File checksumFile = getMetaFile(b); - return new MetaDataInputStream(new FileInputStream(checksumFile), - checksumFile.length()); + final File meta = getMetaFile(b); + if (meta == null || !meta.exists()) { + return null; + } + return new MetaDataInputStream(new FileInputStream(meta), meta.length()); } private final DataNode datanode; @@ -1213,18 +1204,6 @@ class FSDataset implements FSDatasetInterface { return f; } - @Override // FSDatasetInterface - public InputStream getBlockInputStream(ExtendedBlock b) - throws IOException { - File f = getBlockFileNoExistsCheck(b); - try { - return new FileInputStream(f); - } catch (FileNotFoundException fnfe) { - throw new IOException("Block " + b + " is not valid. " + - "Expected block file at " + f + " does not exist."); - } - } - /** * Return the File associated with a block, without first * checking that it exists. This should be used when the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java index 26b2e13c613..029c2707281 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlo import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DataChecksum; -import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.ReflectionUtils; /** * This is an interface for the underlying storage that stores blocks for @@ -123,14 +123,6 @@ public interface FSDatasetInterface map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { @@ -712,40 +705,11 @@ public class SimulatedFSDataset throw new IOException("Block " + b + " is being written, its meta cannot be read"); } - return binfo.getMetaIStream(); - } - - @Override // FSDatasetInterface - public synchronized long getMetaDataLength(ExtendedBlock b) - throws IOException { - final Map map = getMap(b.getBlockPoolId()); - BInfo binfo = map.get(b.getLocalBlock()); - if (binfo == null) { - throw new IOException("No such Block " + b ); - } - if (!binfo.finalized) { - throw new IOException("Block " + b + - " is being written, its metalength cannot be read"); - } - return binfo.getMetaIStream().getLength(); - } - - @Override // FSDatasetInterface - public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b) - throws IOException { - return new MetaDataInputStream(getMetaDataInStream(b), - getMetaDataLength(b)); - } - - @Override // FSDatasetInterface - public synchronized boolean metaFileExists(ExtendedBlock b) throws IOException { - if (!isValidBlock(b)) { - throw new IOException("Block " + b + - " is valid, and cannot be written to."); - } - return true; // crc exists for all valid blocks + final SimulatedInputStream sin = binfo.getMetaIStream(); + return new MetaDataInputStream(sin, sin.getLength()); } + @Override public void checkDataDir() throws DiskErrorException { // nothing to check for simulated data set } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java index 752419fe288..40d55e644ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java @@ -102,7 +102,7 @@ public class TestSimulatedFSDataset extends TestCase { final SimulatedFSDataset fsdataset = getSimulatedFSDataset(); ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0); try { - assertFalse(fsdataset.metaFileExists(b)); + assertTrue(fsdataset.getMetaDataInputStream(b) == null); assertTrue("Expected an IO exception", false); } catch (IOException e) { // ok - as expected