diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java index 554d1131345..38685adb987 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.protocol; import java.util.Arrays; import java.util.List; +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; @@ -184,6 +185,9 @@ public class LocatedBlock { } // Not present in loc, add it and go cachedList.add(loc); + Preconditions.checkArgument(cachedLocs != EMPTY_LOCS, + "Cached locations should only be added when having a backing" + + " disk replica!", loc, locs.length, Arrays.toString(locs)); cachedLocs = cachedList.toArray(cachedLocs); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 41bf47c4dce..465b77ce4cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1537,6 +1537,9 @@ Release 2.8.0 - UNRELEASED HDFS-9205. Do not schedule corrupt blocks for replication. (szetszwo) + HDFS-9250. Add Precondition check to LocatedBlock#addCachedLoc. + (Xiao Chen via wang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java new file mode 100644 index 00000000000..e349da22947 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.junit.Test; + +import static org.junit.Assert.fail; + +public class TestLocatedBlock { + public static final Log LOG = LogFactory.getLog(TestLocatedBlock.class); + + @Test(timeout = 10000) + public void testAddCachedLocWhenEmpty() { + DatanodeInfo[] ds = new DatanodeInfo[0]; + ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1); + LocatedBlock l1 = new LocatedBlock(b1, ds); + DatanodeDescriptor dn = new DatanodeDescriptor( + new DatanodeID("127.0.0.1", "localhost", "abcd", + 5000, 5001, 5002, 5003)); + try { + l1.addCachedLoc(dn); + fail("Adding dn when block is empty should throw"); + } catch (IllegalArgumentException e) { + LOG.info("Expected exception:", e); + } + } +} \ No newline at end of file