Merge r1188282 and r1188286 from trunk for HDFS-2489.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1229489 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-01-10 09:07:52 +00:00
parent 429be99307
commit 3d6c58c0a2
8 changed files with 139 additions and 57 deletions

View File

@ -19,6 +19,9 @@ Release 0.23-PB - Unreleased
HDFS-2480. Separate datatypes for NamenodeProtocol. (suresh) HDFS-2480. Separate datatypes for NamenodeProtocol. (suresh)
HDFS-2489. Move Finalize and Register to separate file out of
DatanodeCommand.java. (suresh)
Release 0.23.1 - UNRELEASED Release 0.23.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.util.CyclicIteration; import org.apache.hadoop.hdfs.util.CyclicIteration;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.CachedDNSToSwitchMapping; import org.apache.hadoop.net.CachedDNSToSwitchMapping;
@ -862,7 +863,7 @@ public class DatanodeManager {
try { try {
nodeinfo = getDatanode(nodeReg); nodeinfo = getDatanode(nodeReg);
} catch(UnregisteredNodeException e) { } catch(UnregisteredNodeException e) {
return new DatanodeCommand[]{DatanodeCommand.REGISTER}; return new DatanodeCommand[]{RegisterCommand.REGISTER};
} }
// Check if this datanode should actually be shutdown instead. // Check if this datanode should actually be shutdown instead.
@ -872,7 +873,7 @@ public class DatanodeManager {
} }
if (nodeinfo == null || !nodeinfo.isAlive) { if (nodeinfo == null || !nodeinfo.isAlive) {
return new DatanodeCommand[]{DatanodeCommand.REGISTER}; return new DatanodeCommand[]{RegisterCommand.REGISTER};
} }
heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed, heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed,

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
@ -699,7 +700,7 @@ class BPOfferService implements Runnable {
} }
break; break;
case DatanodeProtocol.DNA_FINALIZE: case DatanodeProtocol.DNA_FINALIZE:
String bp = ((DatanodeCommand.Finalize) cmd).getBlockPoolId(); String bp = ((FinalizeCommand) cmd).getBlockPoolId();
assert getBlockPoolId().equals(bp) : assert getBlockPoolId().equals(bp) :
"BP " + getBlockPoolId() + " received DNA_FINALIZE " + "BP " + getBlockPoolId() + " received DNA_FINALIZE " +
"for other block pool " + bp; "for other block pool " + bp;

View File

@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@ -799,7 +800,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
namesystem.getBlockManager().processReport(nodeReg, poolId, blist); namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
if (nn.getFSImage().isUpgradeFinalized()) if (nn.getFSImage().isUpgradeFinalized())
return new DatanodeCommand.Finalize(poolId); return new FinalizeCommand(poolId);
return null; return null;
} }

View File

@ -17,17 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.server.protocol; package org.apache.hadoop.hdfs.server.protocol;
import java.io.DataInput; import org.apache.avro.reflect.Union;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableUtils;
import org.apache.avro.reflect.Union;
/** /**
* Base class for data-node command. * Base class for data-node command.
@ -36,55 +28,13 @@ import org.apache.avro.reflect.Union;
// Declare subclasses for Avro's denormalized representation // Declare subclasses for Avro's denormalized representation
@Union({Void.class, @Union({Void.class,
DatanodeCommand.Register.class, DatanodeCommand.Finalize.class, RegisterCommand.class, FinalizeCommand.class,
BlockCommand.class, UpgradeCommand.class, BlockCommand.class, UpgradeCommand.class,
BlockRecoveryCommand.class, KeyUpdateCommand.class}) BlockRecoveryCommand.class, KeyUpdateCommand.class})
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public abstract class DatanodeCommand extends ServerCommand { public abstract class DatanodeCommand extends ServerCommand {
static class Register extends DatanodeCommand {
private Register() {super(DatanodeProtocol.DNA_REGISTER);}
public void readFields(DataInput in) {}
public void write(DataOutput out) {}
}
public static class Finalize extends DatanodeCommand {
String blockPoolId;
private Finalize() {
super(DatanodeProtocol.DNA_FINALIZE);
}
public Finalize(String bpid) {
super(DatanodeProtocol.DNA_FINALIZE);
blockPoolId = bpid;
}
public String getBlockPoolId() {
return blockPoolId;
}
public void readFields(DataInput in) throws IOException {
blockPoolId = WritableUtils.readString(in);
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeString(out, blockPoolId);
}
}
static { // register a ctor
WritableFactories.setFactory(Register.class,
new WritableFactory() {
public Writable newInstance() {return new Register();}
});
WritableFactories.setFactory(Finalize.class,
new WritableFactory() {
public Writable newInstance() {return new Finalize();}
});
}
public static final DatanodeCommand REGISTER = new Register();
public DatanodeCommand() { public DatanodeCommand() {
super(); super();
} }

View File

@ -0,0 +1,68 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.io.WritableUtils;
/**
* A BlockCommand is an instruction to a datanode to register with the namenode.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FinalizeCommand extends DatanodeCommand {
// /////////////////////////////////////////
// Writable
// /////////////////////////////////////////
static { // register a ctor
WritableFactories.setFactory(FinalizeCommand.class, new WritableFactory() {
public Writable newInstance() {
return new FinalizeCommand();
}
});
}
String blockPoolId;
private FinalizeCommand() {
super(DatanodeProtocol.DNA_FINALIZE);
}
public FinalizeCommand(String bpid) {
super(DatanodeProtocol.DNA_FINALIZE);
blockPoolId = bpid;
}
public String getBlockPoolId() {
return blockPoolId;
}
public void readFields(DataInput in) throws IOException {
blockPoolId = WritableUtils.readString(in);
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeString(out, blockPoolId);
}
}

View File

@ -0,0 +1,57 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import java.io.DataInput;
import java.io.DataOutput;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
/**
* A BlockCommand is an instruction to a datanode to register with the namenode.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class RegisterCommand extends DatanodeCommand {
// /////////////////////////////////////////
// Writable
// /////////////////////////////////////////
static { // register a ctor
WritableFactories.setFactory(RegisterCommand.class, new WritableFactory() {
public Writable newInstance() {
return new RegisterCommand();
}
});
}
public static final DatanodeCommand REGISTER = new RegisterCommand();
public RegisterCommand() {
super(DatanodeProtocol.DNA_REGISTER);
}
@Override
public void readFields(DataInput in) { }
@Override
public void write(DataOutput out) { }
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.junit.After; import org.junit.After;
import org.junit.Test; import org.junit.Test;
@ -128,7 +129,7 @@ public class TestDeadDatanode {
// that asks datanode to register again // that asks datanode to register again
DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0); DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0);
Assert.assertEquals(1, cmd.length); Assert.assertEquals(1, cmd.length);
Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction()); .getAction());
} }
} }