diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 48d806c6fb5..2ac39f07717 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -119,6 +119,9 @@ Release 0.23-PB - Unreleased HDFS-2768. BackupNode stop can not close proxy connections because it is not a proxy instance. (Uma Maheswara Rao G via eli) + HDFS-2968. Protocol translator for BlockRecoveryCommand broken when + multiple blocks need recovery. (todd) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index b30a077ee68..fab9f1f1c93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -767,8 +767,9 @@ public class PBHelper { List list = recoveryCmd.getBlocksList(); List recoveringBlocks = new ArrayList( list.size()); - for (int i = 0; i < list.size(); i++) { - recoveringBlocks.add(PBHelper.convert(list.get(0))); + + for (RecoveringBlockProto rbp : list) { + recoveringBlocks.add(PBHelper.convert(rbp)); } return new BlockRecoveryCommand(recoveringBlocks); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java index 0c2e55e6933..5f2ae8eb8d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java @@ -32,6 +32,8 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.io.WritableFactory; +import com.google.common.base.Joiner; + /** * BlockRecoveryCommand is an instruction to a data-node to recover * the specified blocks. @@ -138,6 +140,15 @@ public class BlockRecoveryCommand extends DatanodeCommand { public void add(RecoveringBlock block) { recoveringBlocks.add(block); } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("BlockRecoveryCommand(\n "); + Joiner.on("\n ").appendTo(sb, recoveringBlocks); + sb.append("\n)"); + return sb.toString(); + } /////////////////////////////////////////// // Writable diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index ee873a9d4f3..fd31df00787 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; @@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; +import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; @@ -68,6 +70,10 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import org.junit.Test; +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + /** * Tests for {@link PBHelper} */ @@ -265,9 +271,12 @@ public class TestPBHelper { compare(logs.get(i), logs1.get(i)); } } - public ExtendedBlock getExtendedBlock() { - return new ExtendedBlock("bpid", 1, 100, 2); + return getExtendedBlock(1); + } + + public ExtendedBlock getExtendedBlock(long blkid) { + return new ExtendedBlock("bpid", blkid, 100, 2); } public DatanodeInfo getDNInfo() { @@ -318,6 +327,31 @@ public class TestPBHelper { } } + @Test + public void testConvertBlockRecoveryCommand() { + DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() }; + + List blks = ImmutableList.of( + new RecoveringBlock(getExtendedBlock(1), dnInfo, 3), + new RecoveringBlock(getExtendedBlock(2), dnInfo, 3) + ); + + BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks); + BlockRecoveryCommandProto proto = PBHelper.convert(cmd); + assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId()); + assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId()); + + BlockRecoveryCommand cmd2 = PBHelper.convert(proto); + + List cmd2Blks = Lists.newArrayList( + cmd2.getRecoveringBlocks()); + assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock()); + assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock()); + assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks)); + assertEquals(cmd.toString(), cmd2.toString()); + } + + @Test public void testConvertText() { Text t = new Text("abc".getBytes());