From b95294416c41b24be88e59740a5f2023129c8931 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Thu, 20 Feb 2014 01:43:12 +0000 Subject: [PATCH] HDFS-5483. NN should gracefully handle multiple block replicas on same DN. (Arpit Agarwal) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1570040 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockManager.java | 3 +- .../TestBlockHasMultipleReplicasOnSameDN.java | 137 ++++++++++++++++++ 3 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d3a7edb1eb9..bc21c5feb81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -529,6 +529,9 @@ Release 2.4.0 - UNRELEASED HDFS-5961. OIV cannot load fsimages containing a symbolic link. (kihwal) + HDFS-5483. NN should gracefully handle multiple block replicas on same DN. + (Arpit Agarwal) + BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index dfa2047190c..382c1a11d06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1869,7 +1869,8 @@ private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage, iblk, iState, toAdd, toInvalidate, toCorrupt, toUC); // move block to the head of the list - if (storedBlock != null && (curIndex = storedBlock.findDatanode(dn)) >= 0) { + if (storedBlock != null && + (curIndex = storedBlock.findStorageInfo(storageInfo)) >= 0) { headIndex = storageInfo.moveBlockToHead(storedBlock, curIndex, headIndex); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java new file mode 100644 index 00000000000..0b28d554f11 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; + + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.*; +import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThat; + +/** + * This test verifies NameNode behavior when it gets unexpected block reports + * from DataNodes. The same block is reported by two different storages on + * the same DataNode. Excess replicas on the same DN should be ignored by the NN. + */ +public class TestBlockHasMultipleReplicasOnSameDN { + public static final Log LOG = LogFactory.getLog(TestBlockHasMultipleReplicasOnSameDN.class); + + private static short NUM_DATANODES = 2; + private static final int BLOCK_SIZE = 1024; + private static final long NUM_BLOCKS = 5; + private static final long seed = 0x1BADF00DL; + + private Configuration conf; + private MiniDFSCluster cluster; + private DistributedFileSystem fs; + private DFSClient client; + private String bpid; + + @Before + public void startUpCluster() throws IOException { + conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(NUM_DATANODES) + .build(); + fs = cluster.getFileSystem(); + client = fs.getClient(); + bpid = cluster.getNamesystem().getBlockPoolId(); + } + + @After + public void shutDownCluster() throws IOException { + if (cluster != null) { + fs.close(); + cluster.shutdown(); + cluster = null; + } + } + + private String makeFileName(String prefix) { + return "/" + prefix + ".dat"; + } + + /** + * Verify NameNode behavior when a given DN reports multiple replicas + * of a given block. + */ + @Test + public void testBlockHasMultipleReplicasOnSameDN() throws IOException { + String filename = makeFileName(GenericTestUtils.getMethodName()); + Path filePath = new Path(filename); + + // Write out a file with a few blocks. + DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS, + BLOCK_SIZE, NUM_DATANODES, seed); + + // Get the block list for the file with the block locations. + LocatedBlocks locatedBlocks = client.getLocatedBlocks( + filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS); + + // Generate a fake block report from one of the DataNodes, such + // that it reports one copy of each block on either storage. + DataNode dn = cluster.getDataNodes().get(0); + DatanodeRegistration dnReg = dn.getDNRegistrationForBP(bpid); + StorageBlockReport reports[] = + new StorageBlockReport[MiniDFSCluster.DIRS_PER_DATANODE]; + + ArrayList blocks = new ArrayList(); + + for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { + blocks.add(locatedBlock.getBlock().getLocalBlock()); + } + + for (int i = 0; i < MiniDFSCluster.DIRS_PER_DATANODE; ++i) { + BlockListAsLongs bll = new BlockListAsLongs(blocks, null); + FsVolumeSpi v = dn.getFSDataset().getVolumes().get(i); + DatanodeStorage dns = new DatanodeStorage(v.getStorageID()); + reports[i] = new StorageBlockReport(dns, bll.getBlockListAsLongs()); + } + + // Should not assert! + cluster.getNameNodeRpc().blockReport(dnReg, bpid, reports); + + // Get the block locations once again. + locatedBlocks = client.getLocatedBlocks(filename, 0, BLOCK_SIZE * NUM_BLOCKS); + + // Make sure that each block has two replicas, one on each DataNode. + for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { + DatanodeInfo[] locations = locatedBlock.getLocations(); + assertThat(locations.length, is((int) NUM_DATANODES)); + assertThat(locations[0].getDatanodeUuid(), not(locations[1].getDatanodeUuid())); + } + } +}