HDFS-11680. Ozone: SCM CLI: Implement info container command. Contributed by Yuanbo Liu.

This commit is contained in:
Weiwei Yang 2017-05-10 15:55:48 +08:00 committed by Owen O'Malley
parent 9fcaeceb54
commit 4be9710712
9 changed files with 317 additions and 5 deletions

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.scm.client; package org.apache.hadoop.scm.client;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ReadContainerResponseProto;
import org.apache.hadoop.scm.XceiverClientSpi; import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.scm.XceiverClientManager;
@ -154,6 +156,35 @@ public void deleteContainer(Pipeline pipeline, boolean force)
} }
} }
/**
* Get meta data from an existing container.
*
* @param pipeline - pipeline that represents the container.
* @return ContainerInfo - a message of protobuf which has basic info
* of a container.
* @throws IOException
*/
@Override
public ContainerData readContainer(Pipeline pipeline) throws IOException {
XceiverClientSpi client = null;
try {
client = xceiverClientManager.acquireClient(pipeline);
String traceID = UUID.randomUUID().toString();
ReadContainerResponseProto response =
ContainerProtocolCalls.readContainer(client,
pipeline.getContainerName(), traceID);
LOG.info("Read container {}, leader: {}, machines: {} ",
pipeline.getContainerName(),
pipeline.getLeader(),
pipeline.getMachines());
return response.getContainerData();
} finally {
if (client != null) {
xceiverClientManager.releaseClient(client);
}
}
}
/** /**
* Given an id, return the pipeline associated with the container. * Given an id, return the pipeline associated with the container.
* @param containerId - String Container ID * @param containerId - String Container ID

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.scm.client; package org.apache.hadoop.scm.client;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import java.io.IOException; import java.io.IOException;
@ -51,13 +52,22 @@ public interface ScmClient {
Pipeline getContainer(String containerId) throws IOException; Pipeline getContainer(String containerId) throws IOException;
/** /**
* Delets an existing container. * Deletes an existing container.
* @param pipeline - Pipeline that represents the container. * @param pipeline - Pipeline that represents the container.
* @param force - true to forcibly delete the container. * @param force - true to forcibly delete the container.
* @throws IOException * @throws IOException
*/ */
void deleteContainer(Pipeline pipeline, boolean force) throws IOException; void deleteContainer(Pipeline pipeline, boolean force) throws IOException;
/**
* Read meta data from an existing container.
* @param pipeline - Pipeline that represents the container.
* @return ContainerInfo
* @throws IOException
*/
ContainerData readContainer(Pipeline pipeline) throws IOException;
/** /**
* Gets the container size -- Computed by SCM from Container Reports. * Gets the container size -- Computed by SCM from Container Reports.
* @param pipeline - Pipeline * @param pipeline - Pipeline

View File

@ -45,6 +45,10 @@
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Type; import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Type;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
.WriteChunkRequestProto; .WriteChunkRequestProto;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
.ReadContainerResponseProto;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
.ReadContainerRequestProto;
import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
import java.io.IOException; import java.io.IOException;
@ -272,9 +276,35 @@ public static void deleteContainer(XceiverClientSpi client,
} }
/** /**
* Reads the data given the container name and key. * readContainer call that gets meta data from an existing container.
* *
* @param client - client * @param client - client
* @param traceID - trace ID
* @throws IOException
*/
public static ReadContainerResponseProto readContainer(
XceiverClientSpi client, String containerName,
String traceID) throws IOException {
ReadContainerRequestProto.Builder readRequest =
ReadContainerRequestProto.newBuilder();
readRequest.setName(containerName);
readRequest.setPipeline(client.getPipeline().getProtobufMessage());
ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder();
request.setCmdType(Type.ReadContainer);
request.setReadContainer(readRequest);
request.setTraceID(traceID);
ContainerCommandResponseProto response =
client.sendCommand(request.build());
validateContainerResponse(response);
return response.getReadContainer();
}
/**
* Reads the data given the container name and key.
*
* @param client
* @param containerName - name of the container * @param containerName - name of the container
* @param key - key * @param key - key
* @param traceID - trace ID * @param traceID - trace ID

View File

@ -102,10 +102,16 @@ public ContainerProtos.ContainerData getProtoBufMessage() {
builder.setDbPath(this.getDBPath()); builder.setDbPath(this.getDBPath());
} }
if (this.getHash() != null) {
builder.setHash(this.getHash());
}
if (this.getContainerPath() != null) { if (this.getContainerPath() != null) {
builder.setContainerPath(this.getContainerPath()); builder.setContainerPath(this.getContainerPath());
} }
builder.setOpen(this.isOpen());
for (Map.Entry<String, String> entry : metadata.entrySet()) { for (Map.Entry<String, String> entry : metadata.entrySet()) {
ContainerProtos.KeyValue.Builder keyValBuilder = ContainerProtos.KeyValue.Builder keyValBuilder =
ContainerProtos.KeyValue.newBuilder(); ContainerProtos.KeyValue.newBuilder();

View File

@ -27,7 +27,7 @@
* The protobuf counter part of this class looks like this. * The protobuf counter part of this class looks like this.
* message ContainerInfo { * message ContainerInfo {
* required string containerName = 1; * required string containerName = 1;
* repeated bytes finalhash = 2; * required string finalhash = 2;
* optional int64 size = 3; * optional int64 size = 3;
* optional int64 keycount = 4; * optional int64 keycount = 4;
* } * }

View File

@ -33,6 +33,8 @@
.CreateContainerHandler.CONTAINER_CREATE; .CreateContainerHandler.CONTAINER_CREATE;
import static org.apache.hadoop.ozone.scm.cli.container import static org.apache.hadoop.ozone.scm.cli.container
.DeleteContainerHandler.CONTAINER_DELETE; .DeleteContainerHandler.CONTAINER_DELETE;
import static org.apache.hadoop.ozone.scm.cli.container
.InfoContainerHandler.CONTAINER_INFO;
/** /**
* The handler class of container-specific commands, e.g. createContainer. * The handler class of container-specific commands, e.g. createContainer.
@ -57,6 +59,8 @@ public void execute(CommandLine cmd) throws IOException {
handler = new CreateContainerHandler(getScmClient()); handler = new CreateContainerHandler(getScmClient());
} else if (cmd.hasOption(CONTAINER_DELETE)) { } else if (cmd.hasOption(CONTAINER_DELETE)) {
handler = new DeleteContainerHandler(getScmClient()); handler = new DeleteContainerHandler(getScmClient());
} else if (cmd.hasOption(CONTAINER_INFO)) {
handler = new InfoContainerHandler(getScmClient());
} }
// execute the sub command, throw exception if no sub command found // execute the sub command, throw exception if no sub command found
@ -87,11 +91,14 @@ public void displayHelp() {
private static void addCommandsOption(Options options) { private static void addCommandsOption(Options options) {
Option createContainer = Option createContainer =
new Option(CONTAINER_CREATE, false, "Create container"); new Option(CONTAINER_CREATE, false, "Create container");
Option infoContainer =
new Option(CONTAINER_INFO, true, "Info container");
Option deleteContainer = Option deleteContainer =
new Option(CONTAINER_DELETE, true, "Delete container"); new Option(CONTAINER_DELETE, true, "Delete container");
options.addOption(createContainer); options.addOption(createContainer);
options.addOption(deleteContainer); options.addOption(deleteContainer);
options.addOption(infoContainer);
// TODO : add other options such as delete, close etc. // TODO : add other options such as delete, close etc.
} }

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.scm.cli.container;
import com.google.common.base.Preconditions;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler;
import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import java.io.IOException;
import java.util.stream.Collectors;
/**
* This is the handler that process container info command.
*/
public class InfoContainerHandler extends OzoneCommandHandler {
public static final String CONTAINER_INFO = "info";
/**
* Constructs a handler object.
*
* @param scmClient scm client.
*/
public InfoContainerHandler(ScmClient scmClient) {
super(scmClient);
}
@Override
public void execute(CommandLine cmd) throws IOException {
if (!cmd.hasOption(CONTAINER_INFO)) {
throw new IOException("Expecting container info");
}
String containerName = cmd.getOptionValue(CONTAINER_INFO);
Pipeline pipeline = getScmClient().getContainer(containerName);
Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
ContainerData containerData =
getScmClient().readContainer(pipeline);
// Print container report info.
logOut("Container Name: %s",
containerData.getName());
String openStatus = containerData.getOpen() ? "OPEN" : "CLOSED";
logOut("Container State: %s", openStatus);
if (!containerData.getHash().isEmpty()) {
logOut("Container Hash: %s", containerData.getHash());
}
logOut("Container DB Path: %s", containerData.getDbPath());
logOut("Container Path: %s", containerData.getContainerPath());
// Output meta data.
String metadataStr = containerData.getMetadataList().stream().map(
p -> p.getKey() + ":" + p.getValue()).collect(Collectors.joining(", "));
logOut("Container Metadata: {%s}", metadataStr);
// Print pipeline of an existing container.
logOut("LeaderID: %s", pipeline.getLeader().getHostName());
String machinesStr = pipeline.getMachines().stream().map(
DatanodeID::getHostName).collect(Collectors.joining(","));
logOut("Datanodes: [%s]", machinesStr);
}
@Override
public void displayHelp() {
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp("hdfs scm -container -info <container name>",
new Options());
}
}

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.cblock.util; package org.apache.hadoop.cblock.util;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.container.common.helpers.Pipeline;
@ -60,6 +61,21 @@ public void deleteContainer(Pipeline pipeline, boolean force)
} }
/**
* Create a instance of ContainerData by a given container id,
* since this is a testing class, there is no need set up the hold
* env to get the meta data of the container.
* @param pipeline
* @return
* @throws IOException
*/
@Override
public ContainerData readContainer(Pipeline pipeline) throws IOException {
return ContainerData.newBuilder()
.setName(pipeline.getContainerName())
.build();
}
/** /**
* Return reference to an *existing* container with given ID. * Return reference to an *existing* container with given ID.
* *

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.ozone.scm; package org.apache.hadoop.ozone.scm;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfiguration;
@ -30,14 +31,19 @@
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.util.StringUtils;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.rules.Timeout;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import java.util.List;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
@ -63,6 +69,9 @@ public class TestSCMCli {
private static ByteArrayOutputStream errContent; private static ByteArrayOutputStream errContent;
private static PrintStream errStream; private static PrintStream errStream;
@Rule
public Timeout globalTimeout = new Timeout(30000);
@BeforeClass @BeforeClass
public static void setup() throws Exception { public static void setup() throws Exception {
conf = new OzoneConfiguration(); conf = new OzoneConfiguration();
@ -214,6 +223,116 @@ public void testDeleteContainer() throws Exception {
.contains("Specified key does not exist.")); .contains("Specified key does not exist."));
} }
@Test
public void testInfoContainer() throws Exception {
// The cluster has one Datanode server.
DatanodeID datanodeID = cluster.getDataNodes().get(0).getDatanodeId();
String formatStr =
"Container Name: %s\n" +
"Container State: %s\n" +
"Container DB Path: %s\n" +
"Container Path: %s\n" +
"Container Metadata: {%s}\n" +
"LeaderID: %s\n" +
"Datanodes: [%s]\n";
String formatStrWithHash =
"Container Name: %s\n" +
"Container State: %s\n" +
"Container Hash: %s\n" +
"Container DB Path: %s\n" +
"Container Path: %s\n" +
"Container Metadata: {%s}\n" +
"LeaderID: %s\n" +
"Datanodes: [%s]\n";
// Test a non-exist container
String cname = "nonExistContainer";
String[] info = {"-container", "-info", cname};
int exitCode = runCommandAndGetOutput(info, null, null);
assertEquals(ResultCode.EXECUTION_ERROR, exitCode);
// Create an empty container.
cname = "ContainerTestInfo1";
Pipeline pipeline = scm.allocateContainer(cname);
ContainerData data = new ContainerData(cname);
containerManager.createContainer(pipeline, data);
info = new String[]{"-container", "-info", cname};
ByteArrayOutputStream out = new ByteArrayOutputStream();
exitCode = runCommandAndGetOutput(info, out, null);
assertEquals(ResultCode.SUCCESS, exitCode);
String openStatus = data.isOpen() ? "OPEN" : "CLOSED";
String expected = String.format(formatStr, cname, openStatus,
data.getDBPath(), data.getContainerPath(), "",
datanodeID.getHostName(), datanodeID.getHostName());
assertEquals(expected, out.toString());
out.reset();
// Create an non-empty container
cname = "ContainerTestInfo2";
pipeline = scm.allocateContainer(cname);
data = new ContainerData(cname);
containerManager.createContainer(pipeline, data);
KeyUtils.getDB(data, conf).put(cname.getBytes(),
"someKey".getBytes());
info = new String[]{"-container", "-info", cname};
exitCode = runCommandAndGetOutput(info, out, null);
assertEquals(ResultCode.SUCCESS, exitCode);
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
expected = String.format(formatStr, cname, openStatus,
data.getDBPath(), data.getContainerPath(), "",
datanodeID.getHostName(), datanodeID.getHostName());
assertEquals(expected, out.toString());
out.reset();
// Create a container with some meta data.
cname = "ContainerTestInfo3";
pipeline = scm.allocateContainer(cname);
data = new ContainerData(cname);
data.addMetadata("VOLUME", "shire");
data.addMetadata("owner", "bilbo");
containerManager.createContainer(pipeline, data);
KeyUtils.getDB(data, conf).put(cname.getBytes(),
"someKey".getBytes());
List<String> metaList = data.getAllMetadata().entrySet().stream()
.map(entry -> entry.getKey() + ":" + entry.getValue())
.collect(Collectors.toList());
String metadataStr = StringUtils.join(", ", metaList);
info = new String[]{"-container", "-info", cname};
exitCode = runCommandAndGetOutput(info, out, null);
assertEquals(ResultCode.SUCCESS, exitCode);
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
expected = String.format(formatStr, cname, openStatus,
data.getDBPath(), data.getContainerPath(), metadataStr,
datanodeID.getHostName(), datanodeID.getHostName());
assertEquals(expected, out.toString());
out.reset();
// Close last container and test info again.
containerManager.closeContainer(cname);
info = new String[]{"-container", "-info", cname};
exitCode = runCommandAndGetOutput(info, out, null);
assertEquals(ResultCode.SUCCESS, exitCode);
data = containerManager.readContainer(cname);
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
expected = String.format(formatStrWithHash, cname, openStatus,
data.getHash(), data.getDBPath(), data.getContainerPath(),
metadataStr, datanodeID.getHostName(), datanodeID.getHostName());
assertEquals(expected, out.toString());
}
@Test @Test
public void testNonExistCommand() throws Exception { public void testNonExistCommand() throws Exception {
PrintStream init = System.out; PrintStream init = System.out;
@ -255,8 +374,10 @@ public void testHelp() throws Exception {
String expected1 = String expected1 =
"usage: hdfs scm -container <commands> <options>\n" + "usage: hdfs scm -container <commands> <options>\n" +
"where <commands> can be one of the following\n" + "where <commands> can be one of the following\n" +
" -create Create container\n" + " -create Create container\n" +
" -del <arg> Delete container\n"; " -del <arg> Delete container\n" +
" -info <arg> Info container\n";
assertEquals(expected1, testContent.toString()); assertEquals(expected1, testContent.toString());
testContent.reset(); testContent.reset();