HDDS-16. Remove Pipeline from Datanode Container Protocol protobuf definition. Contributed by Mukul Kumar Singh.

This commit is contained in:
Xiaoyu Yao 2018-05-10 14:49:58 -07:00
parent 48d0b54849
commit 7369f41020
12 changed files with 85 additions and 68 deletions

View File

@ -24,7 +24,7 @@
import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import java.io.IOException; import java.io.IOException;
@ -85,7 +85,7 @@ public ChunkOutputStream(BlockID blockID, String key,
KeyValue keyValue = KeyValue.newBuilder() KeyValue keyValue = KeyValue.newBuilder()
.setKey("TYPE").setValue("KEY").build(); .setKey("TYPE").setValue("KEY").build();
this.containerKeyData = KeyData.newBuilder() this.containerKeyData = KeyData.newBuilder()
.setBlockID(blockID.getProtobuf()) .setBlockID(blockID.getDatanodeBlockIDProtobuf())
.addMetadata(keyValue); .addMetadata(keyValue);
this.xceiverClientManager = xceiverClientManager; this.xceiverClientManager = xceiverClientManager;
this.xceiverClient = xceiverClient; this.xceiverClient = xceiverClient;

View File

@ -17,6 +17,7 @@
package org.apache.hadoop.hdds.client; package org.apache.hadoop.hdds.client;
import org.apache.commons.lang.builder.ToStringBuilder; import org.apache.commons.lang.builder.ToStringBuilder;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
/** /**
@ -56,4 +57,15 @@ public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) {
return new BlockID(blockID.getContainerID(), return new BlockID(blockID.getContainerID(),
blockID.getLocalID()); blockID.getLocalID());
} }
public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() {
return ContainerProtos.DatanodeBlockID.newBuilder().
setContainerID(containerID).setLocalID(localID).build();
}
public static BlockID getFromProtobuf(ContainerProtos.DatanodeBlockID blockID) {
return new BlockID(blockID.getContainerID(),
blockID.getLocalID());
}
} }

View File

@ -50,7 +50,7 @@
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.WriteChunkRequestProto; .WriteChunkRequestProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import java.io.IOException; import java.io.IOException;
@ -133,7 +133,7 @@ public static ReadChunkResponseProto readChunk(XceiverClientSpi xceiverClient,
ChunkInfo chunk, BlockID blockID, String traceID) throws IOException { ChunkInfo chunk, BlockID blockID, String traceID) throws IOException {
ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
.newBuilder() .newBuilder()
.setBlockID(blockID.getProtobuf()) .setBlockID(blockID.getDatanodeBlockIDProtobuf())
.setChunkData(chunk); .setChunkData(chunk);
String id = xceiverClient.getPipeline().getLeader().getUuidString(); String id = xceiverClient.getPipeline().getLeader().getUuidString();
ContainerCommandRequestProto request = ContainerCommandRequestProto ContainerCommandRequestProto request = ContainerCommandRequestProto
@ -163,7 +163,7 @@ public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk,
throws IOException { throws IOException {
WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
.newBuilder() .newBuilder()
.setBlockID(blockID.getProtobuf()) .setBlockID(blockID.getDatanodeBlockIDProtobuf())
.setChunkData(chunk) .setChunkData(chunk)
.setData(data); .setData(data);
String id = xceiverClient.getPipeline().getLeader().getUuidString(); String id = xceiverClient.getPipeline().getLeader().getUuidString();
@ -195,7 +195,7 @@ public static void writeSmallFile(XceiverClientSpi client,
throws IOException { throws IOException {
KeyData containerKeyData = KeyData containerKeyData =
KeyData.newBuilder().setBlockID(blockID.getProtobuf()) KeyData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
.build(); .build();
PutKeyRequestProto.Builder createKeyRequest = PutKeyRequestProto.Builder createKeyRequest =
PutKeyRequestProto.newBuilder() PutKeyRequestProto.newBuilder()
@ -241,7 +241,6 @@ public static void createContainer(XceiverClientSpi client, long containerID,
ContainerProtos.ContainerData.Builder containerData = ContainerProtos ContainerProtos.ContainerData.Builder containerData = ContainerProtos
.ContainerData.newBuilder(); .ContainerData.newBuilder();
containerData.setContainerID(containerID); containerData.setContainerID(containerID);
createRequest.setPipeline(client.getPipeline().getProtobufMessage());
createRequest.setContainerData(containerData.build()); createRequest.setContainerData(containerData.build());
String id = client.getPipeline().getLeader().getUuidString(); String id = client.getPipeline().getLeader().getUuidString();
@ -321,7 +320,6 @@ public static ReadContainerResponseProto readContainer(
ReadContainerRequestProto.Builder readRequest = ReadContainerRequestProto.Builder readRequest =
ReadContainerRequestProto.newBuilder(); ReadContainerRequestProto.newBuilder();
readRequest.setContainerID(containerID); readRequest.setContainerID(containerID);
readRequest.setPipeline(client.getPipeline().getProtobufMessage());
String id = client.getPipeline().getLeader().getUuidString(); String id = client.getPipeline().getLeader().getUuidString();
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder(); ContainerCommandRequestProto.newBuilder();
@ -348,7 +346,7 @@ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client,
BlockID blockID, String traceID) throws IOException { BlockID blockID, String traceID) throws IOException {
KeyData containerKeyData = KeyData KeyData containerKeyData = KeyData
.newBuilder() .newBuilder()
.setBlockID(blockID.getProtobuf()) .setBlockID(blockID.getDatanodeBlockIDProtobuf())
.build(); .build();
GetKeyRequestProto.Builder getKey = GetKeyRequestProto GetKeyRequestProto.Builder getKey = GetKeyRequestProto

View File

@ -20,7 +20,6 @@
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
@ -111,8 +110,8 @@ public ContainerProtos.ChunkInfo getProtoBufMessage() {
} }
for (Map.Entry<String, String> entry : metadata.entrySet()) { for (Map.Entry<String, String> entry : metadata.entrySet()) {
HddsProtos.KeyValue.Builder keyValBuilder = ContainerProtos.KeyValue.Builder keyValBuilder =
HddsProtos.KeyValue.newBuilder(); ContainerProtos.KeyValue.newBuilder();
builder.addMetadata(keyValBuilder.setKey(entry.getKey()) builder.addMetadata(keyValBuilder.setKey(entry.getKey())
.setValue(entry.getValue()).build()); .setValue(entry.getValue()).build());
} }

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.container.common.helpers; package org.apache.hadoop.ozone.container.common.helpers;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import java.io.IOException; import java.io.IOException;
@ -76,11 +75,11 @@ public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws
public ContainerProtos.KeyData getProtoBufMessage() { public ContainerProtos.KeyData getProtoBufMessage() {
ContainerProtos.KeyData.Builder builder = ContainerProtos.KeyData.Builder builder =
ContainerProtos.KeyData.newBuilder(); ContainerProtos.KeyData.newBuilder();
builder.setBlockID(this.blockID.getProtobuf()); builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
builder.addAllChunks(this.chunks); builder.addAllChunks(this.chunks);
for (Map.Entry<String, String> entry : metadata.entrySet()) { for (Map.Entry<String, String> entry : metadata.entrySet()) {
HddsProtos.KeyValue.Builder keyValBuilder = ContainerProtos.KeyValue.Builder keyValBuilder =
HddsProtos.KeyValue.newBuilder(); ContainerProtos.KeyValue.newBuilder();
builder.addMetadata(keyValBuilder.setKey(entry.getKey()) builder.addMetadata(keyValBuilder.setKey(entry.getKey())
.setValue(entry.getValue()).build()); .setValue(entry.getValue()).build());
} }

View File

@ -27,9 +27,7 @@
option java_package = "org.apache.hadoop.hdds.protocol.proto"; option java_package = "org.apache.hadoop.hdds.protocol.proto";
option java_outer_classname = "ContainerProtos"; option java_outer_classname = "ContainerProtos";
option java_generate_equals_and_hash = true; option java_generate_equals_and_hash = true;
package hadoop.hdds; package hadoop.hdds.datanode;
import "hdfs.proto";
import "hdds.proto";
/** /**
* Commands that are used to manipulate the state of containers on a datanode. * Commands that are used to manipulate the state of containers on a datanode.
@ -134,6 +132,28 @@ enum Result {
CLOSED_CONTAINER_RETRY = 27; CLOSED_CONTAINER_RETRY = 27;
} }
/**
* Block ID that uniquely identify a block in Datanode.
*/
message DatanodeBlockID {
required int64 containerID = 1;
required int64 localID = 2;
}
message KeyValue {
required string key = 1;
optional string value = 2;
}
/**
* Lifecycle states of a container in Datanode.
*/
enum ContainerLifeCycleState {
OPEN = 1;
CLOSING = 2;
CLOSED = 3;
}
message ContainerCommandRequestProto { message ContainerCommandRequestProto {
required Type cmdType = 1; // Type of the command required Type cmdType = 1; // Type of the command
@ -205,7 +225,7 @@ message ContainerData {
optional int64 bytesUsed = 6; optional int64 bytesUsed = 6;
optional int64 size = 7; optional int64 size = 7;
optional int64 keyCount = 8; optional int64 keyCount = 8;
optional LifeCycleState state = 9 [default = OPEN]; optional ContainerLifeCycleState state = 9 [default = OPEN];
} }
message ContainerMeta { message ContainerMeta {
@ -215,26 +235,23 @@ message ContainerMeta {
// Container Messages. // Container Messages.
message CreateContainerRequestProto { message CreateContainerRequestProto {
required Pipeline pipeline = 1; required ContainerData containerData = 1;
required ContainerData containerData = 2;
} }
message CreateContainerResponseProto { message CreateContainerResponseProto {
} }
message ReadContainerRequestProto { message ReadContainerRequestProto {
required Pipeline pipeline = 1; required int64 containerID = 1;
required int64 containerID = 2;
} }
message ReadContainerResponseProto { message ReadContainerResponseProto {
optional ContainerData containerData = 2; optional ContainerData containerData = 1;
} }
message UpdateContainerRequestProto { message UpdateContainerRequestProto {
required Pipeline pipeline = 1; required ContainerData containerData = 1;
required ContainerData containerData = 2; optional bool forceUpdate = 2 [default = false];
optional bool forceUpdate = 3 [default = false];
} }
message UpdateContainerResponseProto { message UpdateContainerResponseProto {
@ -262,12 +279,12 @@ message CloseContainerRequestProto {
} }
message CloseContainerResponseProto { message CloseContainerResponseProto {
optional string hash = 2; optional string hash = 1;
optional int64 containerID = 3; optional int64 containerID = 2;
} }
message KeyData { message KeyData {
required BlockID blockID = 1; required DatanodeBlockID blockID = 1;
optional int64 flags = 2; // for future use. optional int64 flags = 2; // for future use.
repeated KeyValue metadata = 3; repeated KeyValue metadata = 3;
repeated ChunkInfo chunks = 4; repeated ChunkInfo chunks = 4;
@ -291,7 +308,7 @@ message GetKeyResponseProto {
message DeleteKeyRequestProto { message DeleteKeyRequestProto {
required BlockID blockID = 1; required DatanodeBlockID blockID = 1;
} }
message DeleteKeyResponseProto { message DeleteKeyResponseProto {
@ -300,7 +317,7 @@ message DeleteKeyResponseProto {
message ListKeyRequestProto { message ListKeyRequestProto {
required int64 containerID = 1; required int64 containerID = 1;
optional int64 startLocalID = 2; optional int64 startLocalID = 2;
required uint32 count = 4; required uint32 count = 3;
} }
@ -325,7 +342,7 @@ enum Stage {
} }
message WriteChunkRequestProto { message WriteChunkRequestProto {
required BlockID blockID = 1; required DatanodeBlockID blockID = 1;
required ChunkInfo chunkData = 2; required ChunkInfo chunkData = 2;
optional bytes data = 3; optional bytes data = 3;
optional Stage stage = 4 [default = COMBINED]; optional Stage stage = 4 [default = COMBINED];
@ -335,26 +352,26 @@ message WriteChunkResponseProto {
} }
message ReadChunkRequestProto { message ReadChunkRequestProto {
required BlockID blockID = 1; required DatanodeBlockID blockID = 1;
required ChunkInfo chunkData = 2; required ChunkInfo chunkData = 2;
} }
message ReadChunkResponseProto { message ReadChunkResponseProto {
required BlockID blockID = 1; required DatanodeBlockID blockID = 1;
required ChunkInfo chunkData = 2; required ChunkInfo chunkData = 2;
required bytes data = 3; required bytes data = 3;
} }
message DeleteChunkRequestProto { message DeleteChunkRequestProto {
required BlockID blockID = 1; required DatanodeBlockID blockID = 1;
required ChunkInfo chunkData = 3; required ChunkInfo chunkData = 2;
} }
message DeleteChunkResponseProto { message DeleteChunkResponseProto {
} }
message ListChunkRequestProto { message ListChunkRequestProto {
required BlockID blockID = 1; required DatanodeBlockID blockID = 1;
required string prevChunkName = 2; required string prevChunkName = 2;
required uint32 count = 3; required uint32 count = 3;
} }

View File

@ -22,7 +22,8 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerLifeCycleState;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -48,7 +49,7 @@ public class ContainerData {
private AtomicLong bytesUsed; private AtomicLong bytesUsed;
private long maxSize; private long maxSize;
private long containerID; private long containerID;
private HddsProtos.LifeCycleState state; private ContainerLifeCycleState state;
/** /**
* Constructs a ContainerData Object. * Constructs a ContainerData Object.
@ -63,7 +64,7 @@ public ContainerData(long containerID,
ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT) * OzoneConsts.GB; ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT) * OzoneConsts.GB;
this.bytesUsed = new AtomicLong(0L); this.bytesUsed = new AtomicLong(0L);
this.containerID = containerID; this.containerID = containerID;
this.state = HddsProtos.LifeCycleState.OPEN; this.state = ContainerLifeCycleState.OPEN;
} }
/** /**
@ -133,8 +134,8 @@ public ContainerProtos.ContainerData getProtoBufMessage() {
builder.setState(this.getState()); builder.setState(this.getState());
for (Map.Entry<String, String> entry : metadata.entrySet()) { for (Map.Entry<String, String> entry : metadata.entrySet()) {
HddsProtos.KeyValue.Builder keyValBuilder = ContainerProtos.KeyValue.Builder keyValBuilder =
HddsProtos.KeyValue.newBuilder(); ContainerProtos.KeyValue.newBuilder();
builder.addMetadata(keyValBuilder.setKey(entry.getKey()) builder.addMetadata(keyValBuilder.setKey(entry.getKey())
.setValue(entry.getValue()).build()); .setValue(entry.getValue()).build());
} }
@ -250,11 +251,11 @@ public synchronized long getContainerID() {
return containerID; return containerID;
} }
public synchronized void setState(HddsProtos.LifeCycleState state) { public synchronized void setState(ContainerLifeCycleState state) {
this.state = state; this.state = state;
} }
public synchronized HddsProtos.LifeCycleState getState() { public synchronized ContainerLifeCycleState getState() {
return this.state; return this.state;
} }
@ -263,7 +264,7 @@ public synchronized HddsProtos.LifeCycleState getState() {
* @return - boolean * @return - boolean
*/ */
public synchronized boolean isOpen() { public synchronized boolean isOpen() {
return HddsProtos.LifeCycleState.OPEN == state; return ContainerLifeCycleState.OPEN == state;
} }
/** /**
@ -271,7 +272,7 @@ public synchronized boolean isOpen() {
*/ */
public synchronized void closeContainer() { public synchronized void closeContainer() {
// TODO: closed or closing here // TODO: closed or closing here
setState(HddsProtos.LifeCycleState.CLOSED); setState(ContainerLifeCycleState.CLOSED);
// Some thing brain dead for now. name + Time stamp of when we get the close // Some thing brain dead for now. name + Time stamp of when we get the close
// container message. // container message.

View File

@ -21,7 +21,6 @@
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException; .StorageContainerException;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
@ -393,10 +392,6 @@ private ContainerCommandResponseProto handleCreateContainer(
msg.getCreateContainer().getContainerData(), conf); msg.getCreateContainer().getContainerData(), conf);
Preconditions.checkNotNull(cData, "Container data is null"); Preconditions.checkNotNull(cData, "Container data is null");
Pipeline pipeline = Pipeline.getFromProtoBuf(
msg.getCreateContainer().getPipeline());
Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
this.containerManager.createContainer(cData); this.containerManager.createContainer(cData);
return ContainerUtils.getContainerResponse(msg); return ContainerUtils.getContainerResponse(msg);
} }

View File

@ -27,7 +27,8 @@
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerLifeCycleState;
import java.io.IOException; import java.io.IOException;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -77,7 +78,7 @@ public void execute(CommandLine cmd) throws IOException {
// Print container report info. // Print container report info.
logOut("Container id: %s", containerID); logOut("Container id: %s", containerID);
String openStatus = String openStatus =
containerData.getState() == HddsProtos.LifeCycleState.OPEN ? "OPEN" : containerData.getState() == ContainerLifeCycleState.OPEN ? "OPEN" :
"CLOSED"; "CLOSED";
logOut("Container State: %s", openStatus); logOut("Container State: %s", openStatus);
if (!containerData.getHash().isEmpty()) { if (!containerData.getHash().isEmpty()) {

View File

@ -38,7 +38,7 @@ final class OzoneContainerTranslation {
public static KeyData containerKeyDataForRead(BlockID blockID) { public static KeyData containerKeyDataForRead(BlockID blockID) {
return KeyData return KeyData
.newBuilder() .newBuilder()
.setBlockID(blockID.getProtobuf()) .setBlockID(blockID.getDatanodeBlockIDProtobuf())
.build(); .build();
} }

View File

@ -28,11 +28,10 @@
.ContainerCommandRequestProto; .ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto; .ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -204,7 +203,7 @@ public static ContainerCommandRequestProto getWriteChunkRequest(
Pipeline newPipeline = Pipeline newPipeline =
new Pipeline(pipeline.getPipelineChannel()); new Pipeline(pipeline.getPipelineChannel());
writeRequest.setBlockID(blockID.getProtobuf()); writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
byte[] data = getData(datalen); byte[] data = getData(datalen);
ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen); ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
@ -361,7 +360,6 @@ public static ContainerCommandRequestProto getCreateContainerRequest(
.ContainerData.newBuilder(); .ContainerData.newBuilder();
containerData.setContainerID(containerID); containerData.setContainerID(containerID);
createRequest.setContainerData(containerData.build()); createRequest.setContainerData(containerData.build());
createRequest.setPipeline(pipeline.getProtobufMessage());
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder(); ContainerCommandRequestProto.newBuilder();
@ -399,7 +397,6 @@ public static ContainerCommandRequestProto getUpdateContainerRequest(
} }
Pipeline pipeline = Pipeline pipeline =
ContainerTestHelper.createSingleNodePipeline(); ContainerTestHelper.createSingleNodePipeline();
updateRequestBuilder.setPipeline(pipeline.getProtobufMessage());
updateRequestBuilder.setContainerData(containerData.build()); updateRequestBuilder.setContainerData(containerData.build());
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto.Builder request =
@ -469,7 +466,8 @@ public static ContainerCommandRequestProto getPutKeyRequest(
*/ */
public static ContainerCommandRequestProto getKeyRequest( public static ContainerCommandRequestProto getKeyRequest(
Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) { Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
HddsProtos.BlockID blockID = putKeyRequest.getKeyData().getBlockID(); ContainerProtos.DatanodeBlockID blockID =
putKeyRequest.getKeyData().getBlockID();
LOG.trace("getKey: blockID={}", blockID); LOG.trace("getKey: blockID={}", blockID);
ContainerProtos.GetKeyRequestProto.Builder getRequest = ContainerProtos.GetKeyRequestProto.Builder getRequest =

View File

@ -32,7 +32,6 @@
import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl; import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Benchmark;
@ -168,8 +167,6 @@ public void cleanup() throws IOException {
private ContainerCommandRequestProto getCreateContainerCommand(long containerID) { private ContainerCommandRequestProto getCreateContainerCommand(long containerID) {
CreateContainerRequestProto.Builder createRequest = CreateContainerRequestProto.Builder createRequest =
CreateContainerRequestProto.newBuilder(); CreateContainerRequestProto.newBuilder();
createRequest.setPipeline(
new Pipeline(pipelineChannel).getProtobufMessage());
createRequest.setContainerData( createRequest.setContainerData(
ContainerData.newBuilder().setContainerID( ContainerData.newBuilder().setContainerID(
containerID).build()); containerID).build());
@ -187,7 +184,7 @@ private ContainerCommandRequestProto getWriteChunkCommand(
BlockID blockID, String chunkName) { BlockID blockID, String chunkName) {
WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
.newBuilder() .newBuilder()
.setBlockID(blockID.getProtobuf()) .setBlockID(blockID.getDatanodeBlockIDProtobuf())
.setChunkData(getChunkInfo(blockID, chunkName)) .setChunkData(getChunkInfo(blockID, chunkName))
.setData(data); .setData(data);
@ -204,7 +201,7 @@ private ContainerCommandRequestProto getReadChunkCommand(
BlockID blockID, String chunkName) { BlockID blockID, String chunkName) {
ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
.newBuilder() .newBuilder()
.setBlockID(blockID.getProtobuf()) .setBlockID(blockID.getDatanodeBlockIDProtobuf())
.setChunkData(getChunkInfo(blockID, chunkName)); .setChunkData(getChunkInfo(blockID, chunkName));
ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
.newBuilder(); .newBuilder();
@ -258,7 +255,7 @@ private ContainerProtos.KeyData getKeyData(
BlockID blockID, String chunkKey) { BlockID blockID, String chunkKey) {
ContainerProtos.KeyData.Builder builder = ContainerProtos.KeyData ContainerProtos.KeyData.Builder builder = ContainerProtos.KeyData
.newBuilder() .newBuilder()
.setBlockID(blockID.getProtobuf()) .setBlockID(blockID.getDatanodeBlockIDProtobuf())
.addChunks(getChunkInfo(blockID, chunkKey)); .addChunks(getChunkInfo(blockID, chunkKey));
return builder.build(); return builder.build();
} }