From 3d6c58c0a2fa32bb1601490dd1788605fc18100a Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 10 Jan 2012 09:07:52 +0000 Subject: [PATCH] Merge r1188282 and r1188286 from trunk for HDFS-2489. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1229489 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../blockmanagement/DatanodeManager.java | 5 +- .../hdfs/server/datanode/BPOfferService.java | 3 +- .../server/namenode/NameNodeRpcServer.java | 3 +- .../hdfs/server/protocol/DatanodeCommand.java | 54 +-------------- .../hdfs/server/protocol/FinalizeCommand.java | 68 +++++++++++++++++++ .../hdfs/server/protocol/RegisterCommand.java | 57 ++++++++++++++++ .../server/namenode/TestDeadDatanode.java | 3 +- 8 files changed, 139 insertions(+), 57 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3f8f2e891e4..31013df8511 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -19,6 +19,9 @@ Release 0.23-PB - Unreleased HDFS-2480. Separate datatypes for NamenodeProtocol. (suresh) + HDFS-2489. Move Finalize and Register to separate file out of + DatanodeCommand.java. (suresh) + Release 0.23.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index e11355aed9b..5d795e74455 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; +import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.util.CyclicIteration; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.CachedDNSToSwitchMapping; @@ -862,7 +863,7 @@ public class DatanodeManager { try { nodeinfo = getDatanode(nodeReg); } catch(UnregisteredNodeException e) { - return new DatanodeCommand[]{DatanodeCommand.REGISTER}; + return new DatanodeCommand[]{RegisterCommand.REGISTER}; } // Check if this datanode should actually be shutdown instead. @@ -872,7 +873,7 @@ public class DatanodeManager { } if (nodeinfo == null || !nodeinfo.isAlive) { - return new DatanodeCommand[]{DatanodeCommand.REGISTER}; + return new DatanodeCommand[]{RegisterCommand.REGISTER}; } heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 783bd34d858..a58de18f04c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; +import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; @@ -699,7 +700,7 @@ class BPOfferService implements Runnable { } break; case DatanodeProtocol.DNA_FINALIZE: - String bp = ((DatanodeCommand.Finalize) cmd).getBlockPoolId(); + String bp = ((FinalizeCommand) cmd).getBlockPoolId(); assert getBlockPoolId().equals(bp) : "BP " + getBlockPoolId() + " received DNA_FINALIZE " + "for other block pool " + bp; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index c8e096c4fcd..581bced4d00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; @@ -799,7 +800,7 @@ class NameNodeRpcServer implements NamenodeProtocols { namesystem.getBlockManager().processReport(nodeReg, poolId, blist); if (nn.getFSImage().isUpgradeFinalized()) - return new DatanodeCommand.Finalize(poolId); + return new FinalizeCommand(poolId); return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java index 52396d2408f..9c6950f2174 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java @@ -17,17 +17,9 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - +import org.apache.avro.reflect.Union; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableFactory; -import org.apache.hadoop.io.WritableFactories; -import org.apache.hadoop.io.WritableUtils; -import org.apache.avro.reflect.Union; /** * Base class for data-node command. @@ -36,55 +28,13 @@ import org.apache.avro.reflect.Union; // Declare subclasses for Avro's denormalized representation @Union({Void.class, - DatanodeCommand.Register.class, DatanodeCommand.Finalize.class, + RegisterCommand.class, FinalizeCommand.class, BlockCommand.class, UpgradeCommand.class, BlockRecoveryCommand.class, KeyUpdateCommand.class}) @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class DatanodeCommand extends ServerCommand { - static class Register extends DatanodeCommand { - private Register() {super(DatanodeProtocol.DNA_REGISTER);} - public void readFields(DataInput in) {} - public void write(DataOutput out) {} - } - - public static class Finalize extends DatanodeCommand { - String blockPoolId; - private Finalize() { - super(DatanodeProtocol.DNA_FINALIZE); - } - - public Finalize(String bpid) { - super(DatanodeProtocol.DNA_FINALIZE); - blockPoolId = bpid; - } - - public String getBlockPoolId() { - return blockPoolId; - } - - public void readFields(DataInput in) throws IOException { - blockPoolId = WritableUtils.readString(in); - } - public void write(DataOutput out) throws IOException { - WritableUtils.writeString(out, blockPoolId); - } - } - - static { // register a ctor - WritableFactories.setFactory(Register.class, - new WritableFactory() { - public Writable newInstance() {return new Register();} - }); - WritableFactories.setFactory(Finalize.class, - new WritableFactory() { - public Writable newInstance() {return new Finalize();} - }); - } - - public static final DatanodeCommand REGISTER = new Register(); - public DatanodeCommand() { super(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java new file mode 100644 index 00000000000..3bc8b117c2c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; +import org.apache.hadoop.io.WritableUtils; + +/** + * A BlockCommand is an instruction to a datanode to register with the namenode. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class FinalizeCommand extends DatanodeCommand { + // ///////////////////////////////////////// + // Writable + // ///////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory(FinalizeCommand.class, new WritableFactory() { + public Writable newInstance() { + return new FinalizeCommand(); + } + }); + } + + String blockPoolId; + private FinalizeCommand() { + super(DatanodeProtocol.DNA_FINALIZE); + } + + public FinalizeCommand(String bpid) { + super(DatanodeProtocol.DNA_FINALIZE); + blockPoolId = bpid; + } + + public String getBlockPoolId() { + return blockPoolId; + } + + public void readFields(DataInput in) throws IOException { + blockPoolId = WritableUtils.readString(in); + } + public void write(DataOutput out) throws IOException { + WritableUtils.writeString(out, blockPoolId); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java new file mode 100644 index 00000000000..05843475f65 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import java.io.DataInput; +import java.io.DataOutput; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableFactories; +import org.apache.hadoop.io.WritableFactory; + +/** + * A BlockCommand is an instruction to a datanode to register with the namenode. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class RegisterCommand extends DatanodeCommand { + // ///////////////////////////////////////// + // Writable + // ///////////////////////////////////////// + static { // register a ctor + WritableFactories.setFactory(RegisterCommand.class, new WritableFactory() { + public Writable newInstance() { + return new RegisterCommand(); + } + }); + } + + public static final DatanodeCommand REGISTER = new RegisterCommand(); + + public RegisterCommand() { + super(DatanodeProtocol.DNA_REGISTER); + } + + @Override + public void readFields(DataInput in) { } + + @Override + public void write(DataOutput out) { } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 021d60a5505..bfc02360d48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.junit.After; import org.junit.Test; @@ -128,7 +129,7 @@ public class TestDeadDatanode { // that asks datanode to register again DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0); Assert.assertEquals(1, cmd.length); - Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER + Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER .getAction()); } }