HDFS-6076. DataNode with SimulatedDataSet should not create DatanodeRegistration with namenode layout version and namenode node type.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1575452 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
90b399c4bd
commit
1f6c2b09c6
|
@ -394,6 +394,10 @@ BREAKDOWN OF HDFS-5535 ROLLING UPGRADE SUBTASKS AND RELATED JIRAS
|
|||
|
||||
HDFS-6060. NameNode should not check DataNode layout version (brandonli)
|
||||
|
||||
HDFS-6076. DataNode with SimulatedDataSet should not create
|
||||
DatanodeRegistration with namenode layout version and namenode node type.
|
||||
(szetszwo)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -329,7 +329,7 @@ class BPOfferService {
|
|||
}
|
||||
}
|
||||
|
||||
synchronized DatanodeRegistration createRegistration() throws IOException {
|
||||
synchronized DatanodeRegistration createRegistration() {
|
||||
Preconditions.checkState(bpNSInfo != null,
|
||||
"getRegistration() can only be called after initial handshake");
|
||||
return dn.createBPRegistration(bpNSInfo);
|
||||
|
|
|
@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.protobuf.BlockingService;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -47,6 +48,7 @@ import org.apache.hadoop.hdfs.protocolPB.*;
|
|||
import org.apache.hadoop.hdfs.security.token.block.*;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
|
@ -88,6 +90,7 @@ import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
|||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.net.*;
|
||||
|
@ -771,12 +774,14 @@ public class DataNode extends Configured
|
|||
* Create a DatanodeRegistration for a specific block pool.
|
||||
* @param nsInfo the namespace info from the first part of the NN handshake
|
||||
*/
|
||||
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo)
|
||||
throws IOException {
|
||||
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
|
||||
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
|
||||
if (storageInfo == null) {
|
||||
// it's null in the case of SimulatedDataSet
|
||||
storageInfo = new StorageInfo(nsInfo);
|
||||
storageInfo = new StorageInfo(
|
||||
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
|
||||
nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
|
||||
NodeType.DATA_NODE);
|
||||
}
|
||||
|
||||
DatanodeID dnId = new DatanodeID(
|
||||
|
|
Loading…
Reference in New Issue