HDFS-11675. Ozone: SCM CLI: Implement delete container command. Contributed by Weiwei Yang.

This commit is contained in:
Anu Engineer 2017-05-01 09:26:35 -07:00
parent 50dd3a5cfa
commit c3f397eead
18 changed files with 338 additions and 30 deletions

View File

@ -82,9 +82,11 @@ public class ContainerOperationClient implements ScmClient {
client = xceiverClientManager.acquireClient(pipeline); client = xceiverClientManager.acquireClient(pipeline);
String traceID = UUID.randomUUID().toString(); String traceID = UUID.randomUUID().toString();
ContainerProtocolCalls.createContainer(client, traceID); ContainerProtocolCalls.createContainer(client, traceID);
LOG.info("Created container " + containerId + if (LOG.isDebugEnabled()) {
" leader:" + pipeline.getLeader() + LOG.debug("Created container " + containerId
" machines:" + pipeline.getMachines()); + " leader:" + pipeline.getLeader()
+ " machines:" + pipeline.getMachines());
}
return pipeline; return pipeline;
} finally { } finally {
if (client != null) { if (client != null) {
@ -128,11 +130,26 @@ public class ContainerOperationClient implements ScmClient {
/** /**
* Delete the container, this will release any resource it uses. * Delete the container, this will release any resource it uses.
* @param pipeline - Pipeline that represents the container. * @param pipeline - Pipeline that represents the container.
* @param force - True to forcibly delete the container.
* @throws IOException * @throws IOException
*/ */
@Override @Override
public void deleteContainer(Pipeline pipeline) throws IOException { public void deleteContainer(Pipeline pipeline, boolean force)
// TODO throws IOException {
XceiverClientSpi client = null;
try {
client = xceiverClientManager.acquireClient(pipeline);
String traceID = UUID.randomUUID().toString();
ContainerProtocolCalls.deleteContainer(client, force, traceID);
LOG.info("Deleted container {}, leader: {}, machines: {} ",
pipeline.getContainerName(),
pipeline.getLeader(),
pipeline.getMachines());
} finally {
if (client != null) {
xceiverClientManager.releaseClient(client);
}
}
} }
/** /**
@ -144,8 +161,7 @@ public class ContainerOperationClient implements ScmClient {
@Override @Override
public Pipeline getContainer(String containerId) throws public Pipeline getContainer(String containerId) throws
IOException { IOException {
// TODO return storageContainerLocationClient.getContainer(containerId);
return null;
} }
/** /**

View File

@ -53,9 +53,10 @@ public interface ScmClient {
/** /**
* Delets an existing container. * Delets an existing container.
* @param pipeline - Pipeline that represents the container. * @param pipeline - Pipeline that represents the container.
* @param force - true to forcibly delete the container.
* @throws IOException * @throws IOException
*/ */
void deleteContainer(Pipeline pipeline) throws IOException; void deleteContainer(Pipeline pipeline, boolean force) throws IOException;
/** /**
* Gets the container size -- Computed by SCM from Container Reports. * Gets the container size -- Computed by SCM from Container Reports.

View File

@ -62,4 +62,14 @@ public interface StorageContainerLocationProtocol {
Pipeline allocateContainer(String containerName, Pipeline allocateContainer(String containerName,
ScmClient.ReplicationFactor replicationFactor) throws IOException; ScmClient.ReplicationFactor replicationFactor) throws IOException;
/**
* Ask SCM the location of the container. SCM responds with a group of
* nodes where this container and its replicas are located.
*
* @param containerName - Name of the container.
* @return Pipeline - the pipeline where container locates.
* @throws IOException
*/
Pipeline getContainer(String containerName) throws IOException;
} }

View File

@ -35,6 +35,8 @@ import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolPr
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetStorageContainerLocationsRequestProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetStorageContainerLocationsRequestProto;
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetStorageContainerLocationsResponseProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetStorageContainerLocationsResponseProto;
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.LocatedContainerProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.LocatedContainerProto;
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import java.io.Closeable; import java.io.Closeable;
@ -146,6 +148,24 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
return Pipeline.getFromProtoBuf(response.getPipeline()); return Pipeline.getFromProtoBuf(response.getPipeline());
} }
public Pipeline getContainer(String containerName) throws IOException {
Preconditions.checkNotNull(containerName,
"Container Name cannot be Null");
Preconditions.checkState(!containerName.isEmpty(),
"Container name cannot be empty");
GetContainerRequestProto request = GetContainerRequestProto
.newBuilder()
.setContainerName(containerName)
.build();
try {
GetContainerResponseProto response =
rpcProxy.getContainer(NULL_RPC_CONTROLLER, request);
return Pipeline.getFromProtoBuf(response.getPipeline());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override @Override
public Object getUnderlyingProxyObject() { public Object getUnderlyingProxyObject() {
return rpcProxy; return rpcProxy;

View File

@ -245,6 +245,32 @@ public final class ContainerProtocolCalls {
validateContainerResponse(response); validateContainerResponse(response);
} }
/**
* Deletes a container from a pipeline.
*
* @param client
* @param force whether or not to forcibly delete the container.
* @param traceID
* @throws IOException
*/
public static void deleteContainer(XceiverClientSpi client,
boolean force, String traceID) throws IOException {
ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest =
ContainerProtos.DeleteContainerRequestProto.newBuilder();
deleteRequest.setName(client.getPipeline().getContainerName());
deleteRequest.setPipeline(client.getPipeline().getProtobufMessage());
deleteRequest.setForceDelete(force);
ContainerCommandRequestProto.Builder request =
ContainerCommandRequestProto.newBuilder();
request.setCmdType(ContainerProtos.Type.DeleteContainer);
request.setDeleteContainer(deleteRequest);
request.setTraceID(traceID);
ContainerCommandResponseProto response =
client.sendCommand(request.build());
validateContainerResponse(response);
}
/** /**
* Reads the data given the container name and key. * Reads the data given the container name and key.
* *

View File

@ -84,6 +84,14 @@ message ContainerResponseProto {
optional string errorMessage = 3; optional string errorMessage = 3;
} }
message GetContainerRequestProto {
required string containerName = 1;
}
message GetContainerResponseProto {
required hadoop.hdfs.ozone.Pipeline pipeline = 1;
}
// SCM Block protocol // SCM Block protocol
/** /**
* keys - batch of block keys to find * keys - batch of block keys to find
@ -146,10 +154,14 @@ service StorageContainerLocationProtocolService {
returns(GetStorageContainerLocationsResponseProto); returns(GetStorageContainerLocationsResponseProto);
/** /**
Creates a container entry in SCM. * Creates a container entry in SCM.
*/ */
rpc allocateContainer(ContainerRequestProto) returns (ContainerResponseProto); rpc allocateContainer(ContainerRequestProto) returns (ContainerResponseProto);
/**
* Returns the pipeline for a given container.
*/
rpc getContainer(GetContainerRequestProto) returns (GetContainerResponseProto);
/** /**
* Find the set of nodes that currently host the block, as * Find the set of nodes that currently host the block, as

View File

@ -224,7 +224,7 @@ public class StorageManager {
for (String containerID : volume.getContainerIDsList()) { for (String containerID : volume.getContainerIDsList()) {
try { try {
Pipeline pipeline = storageClient.getContainer(containerID); Pipeline pipeline = storageClient.getContainer(containerID);
storageClient.deleteContainer(pipeline); storageClient.deleteContainer(pipeline, force);
} catch (IOException e) { } catch (IOException e) {
LOGGER.error("Error deleting container Container:{} error:{}", LOGGER.error("Error deleting container Container:{} error:{}",
containerID, e); containerID, e);

View File

@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker; import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker;
import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker; import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@ -1592,7 +1593,12 @@ public class DataNode extends ReconfigurableBase
} }
registerBlockPoolWithSecretManager(bpRegistration, blockPoolId); registerBlockPoolWithSecretManager(bpRegistration, blockPoolId);
} }
@VisibleForTesting
public OzoneContainer getOzoneContainerManager() {
return this.datanodeStateMachine.getContainer();
}
/** /**
* After the block pool has contacted the NN, registers that block pool * After the block pool has contacted the NN, registers that block pool
* with the secret manager, updating it with the secrets provided by the NN. * with the secret manager, updating it with the secrets provided by the NN.

View File

@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.container.ozoneimpl; package org.apache.hadoop.ozone.container.ozoneimpl;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
@ -193,4 +194,9 @@ public class OzoneContainer {
public List<ContainerData> getContainerReports() throws IOException { public List<ContainerData> getContainerReports() throws IOException {
return this.manager.getContainerReports(); return this.manager.getContainerReports();
} }
@VisibleForTesting
public ContainerManager getContainerManager() {
return this.manager;
}
} }

View File

@ -54,6 +54,10 @@ import org.apache.hadoop.ozone.protocol.proto
.StorageContainerLocationProtocolProtos.GetScmBlockLocationsRequestProto; .StorageContainerLocationProtocolProtos.GetScmBlockLocationsRequestProto;
import org.apache.hadoop.ozone.protocol.proto import org.apache.hadoop.ozone.protocol.proto
.StorageContainerLocationProtocolProtos.GetScmBlockLocationsResponseProto; .StorageContainerLocationProtocolProtos.GetScmBlockLocationsResponseProto;
import org.apache.hadoop.ozone.protocol.proto
.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.ozone.protocol.proto
.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB;
@ -130,6 +134,20 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
} }
} }
@Override
public GetContainerResponseProto getContainer(
RpcController controller, GetContainerRequestProto request)
throws ServiceException {
try {
Pipeline pipeline = impl.getContainer(request.getContainerName());
return GetContainerResponseProto.newBuilder()
.setPipeline(pipeline.getProtobufMessage())
.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override @Override
public GetScmBlockLocationsResponseProto getScmBlockLocations( public GetScmBlockLocationsResponseProto getScmBlockLocations(
RpcController controller, GetScmBlockLocationsRequestProto req) RpcController controller, GetScmBlockLocationsRequestProto req)

View File

@ -365,8 +365,11 @@ public class StorageContainerManager
ScmClient.ReplicationFactor.ONE); ScmClient.ReplicationFactor.ONE);
} }
@VisibleForTesting /**
Pipeline getContainer(String containerName) throws IOException { * {@inheritDoc}
*/
@Override
public Pipeline getContainer(String containerName) throws IOException {
return scmContainerManager.getContainer(containerName); return scmContainerManager.getContainer(containerName);
} }

View File

@ -19,10 +19,9 @@ package org.apache.hadoop.ozone.scm.cli;
import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.scm.client.ScmClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
/** /**
* The abstract class of all SCM CLI commands. * The abstract class of all SCM CLI commands.
@ -30,8 +29,8 @@ import java.io.IOException;
public abstract class OzoneCommandHandler { public abstract class OzoneCommandHandler {
private ScmClient scmClient; private ScmClient scmClient;
protected static final Logger LOG = protected PrintStream out = System.out;
LoggerFactory.getLogger(OzoneCommandHandler.class); protected PrintStream err = System.err;
/** /**
* Constructs a handler object. * Constructs a handler object.
@ -44,6 +43,26 @@ public abstract class OzoneCommandHandler {
return scmClient; return scmClient;
} }
/**
* Sets customized output stream to redirect the stdout to somewhere else.
* @param out
*/
public void setOut(PrintStream out) {
this.out = out;
}
/**
* Sets customized error stream to redirect the stderr to somewhere else.
* @param err
*/
public void setErr(PrintStream err) {
this.err = err;
}
public void logOut(String msg, String ... variable) {
this.out.println(String.format(msg, variable));
}
/** /**
* Executes the Client command. * Executes the Client command.
* *

View File

@ -38,8 +38,6 @@ import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSi
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
@ -63,8 +61,6 @@ import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB;
*/ */
public class SCMCLI extends OzoneBaseCLI { public class SCMCLI extends OzoneBaseCLI {
private static final Logger LOG = LoggerFactory.getLogger(SCMCLI.class);
public static final String HELP_OP = "help"; public static final String HELP_OP = "help";
public static final int CMD_WIDTH = 80; public static final int CMD_WIDTH = 80;
@ -203,7 +199,7 @@ public class SCMCLI extends OzoneBaseCLI {
BasicParser parser = new BasicParser(); BasicParser parser = new BasicParser();
return parser.parse(opts, argv); return parser.parse(opts, argv);
} catch (ParseException ex) { } catch (ParseException ex) {
LOG.error(ex.getMessage()); err.println(ex.getMessage());
} }
return null; return null;
} }
@ -216,6 +212,7 @@ public class SCMCLI extends OzoneBaseCLI {
if (cmd.hasOption(CONTAINER_CMD)) { if (cmd.hasOption(CONTAINER_CMD)) {
handler = new ContainerCommandHandler(scmClient); handler = new ContainerCommandHandler(scmClient);
} }
if (handler == null) { if (handler == null) {
if (cmd.hasOption(HELP_OP)) { if (cmd.hasOption(HELP_OP)) {
displayHelp(); displayHelp();
@ -226,6 +223,9 @@ public class SCMCLI extends OzoneBaseCLI {
return UNRECOGNIZED_CMD; return UNRECOGNIZED_CMD;
} }
} else { } else {
// Redirect stdout and stderr if necessary.
handler.setOut(this.out);
handler.setErr(this.err);
handler.execute(cmd); handler.execute(cmd);
return SUCCESS; return SUCCESS;
} }

View File

@ -29,7 +29,10 @@ import java.util.Arrays;
import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH; import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH;
import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP; import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP;
import static org.apache.hadoop.ozone.scm.cli.container.CreateContainerHandler.CONTAINER_CREATE; import static org.apache.hadoop.ozone.scm.cli.container
.CreateContainerHandler.CONTAINER_CREATE;
import static org.apache.hadoop.ozone.scm.cli.container
.DeleteContainerHandler.CONTAINER_DELETE;
/** /**
* The handler class of container-specific commands, e.g. createContainer. * The handler class of container-specific commands, e.g. createContainer.
@ -52,10 +55,15 @@ public class ContainerCommandHandler extends OzoneCommandHandler {
OzoneCommandHandler handler = null; OzoneCommandHandler handler = null;
if (cmd.hasOption(CONTAINER_CREATE)) { if (cmd.hasOption(CONTAINER_CREATE)) {
handler = new CreateContainerHandler(getScmClient()); handler = new CreateContainerHandler(getScmClient());
} else if (cmd.hasOption(CONTAINER_DELETE)) {
handler = new DeleteContainerHandler(getScmClient());
} }
// execute the sub command, throw exception if no sub command found // execute the sub command, throw exception if no sub command found
// unless -help option is given. // unless -help option is given.
if (handler != null) { if (handler != null) {
handler.setOut(this.out);
handler.setErr(this.err);
handler.execute(cmd); handler.execute(cmd);
} else { } else {
displayHelp(); displayHelp();
@ -79,7 +87,11 @@ public class ContainerCommandHandler extends OzoneCommandHandler {
private static void addCommandsOption(Options options) { private static void addCommandsOption(Options options) {
Option createContainer = Option createContainer =
new Option(CONTAINER_CREATE, false, "Create container"); new Option(CONTAINER_CREATE, false, "Create container");
Option deleteContainer =
new Option(CONTAINER_DELETE, true, "Delete container");
options.addOption(createContainer); options.addOption(createContainer);
options.addOption(deleteContainer);
// TODO : add other options such as delete, close etc. // TODO : add other options such as delete, close etc.
} }
@ -87,6 +99,7 @@ public class ContainerCommandHandler extends OzoneCommandHandler {
addCommandsOption(options); addCommandsOption(options);
// for create container options. // for create container options.
CreateContainerHandler.addOptions(options); CreateContainerHandler.addOptions(options);
DeleteContainerHandler.addOptions(options);
// TODO : add other options such as delete, close etc. // TODO : add other options such as delete, close etc.
} }
} }

View File

@ -56,9 +56,10 @@ public class CreateContainerHandler extends OzoneCommandHandler {
} }
} }
String pipelineID = cmd.getOptionValue(PIPELINE_ID); String pipelineID = cmd.getOptionValue(PIPELINE_ID);
LOG.info("Create container : {}", pipelineID);
logOut("Creating container : %s.", pipelineID);
getScmClient().createContainer(pipelineID); getScmClient().createContainer(pipelineID);
LOG.debug("Container creation returned"); logOut("Container created.");
} }
@Override @Override

View File

@ -0,0 +1,78 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.scm.cli.container;
import com.google.common.base.Preconditions;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler;
import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import java.io.IOException;
import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH;
/**
* This is the handler that process delete container command.
*/
public class DeleteContainerHandler extends OzoneCommandHandler {
protected static final String CONTAINER_DELETE = "del";
protected static final String OPT_FORCE = "f";
public DeleteContainerHandler(ScmClient scmClient) {
super(scmClient);
}
@Override
public void execute(CommandLine cmd) throws IOException {
Preconditions.checkArgument(cmd.hasOption(CONTAINER_DELETE),
"Expecting command del");
String containerName = cmd.getOptionValue(CONTAINER_DELETE);
Pipeline pipeline = getScmClient().getContainer(containerName);
if (pipeline == null) {
throw new IOException("Cannot delete an non-exist container "
+ containerName);
}
logOut("Deleting container : %s.", containerName);
getScmClient().deleteContainer(pipeline, cmd.hasOption(OPT_FORCE));
logOut("Container %s deleted.", containerName);
}
@Override public void displayHelp() {
Options options = new Options();
addOptions(options);
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -del <option>",
"where <option> is", options, "");
}
public static void addOptions(Options options) {
Option forceOpt = new Option(OPT_FORCE,
false,
"forcibly delete a container");
options.addOption(forceOpt);
}
}

View File

@ -55,7 +55,8 @@ public class MockStorageClient implements ScmClient {
* @throws IOException * @throws IOException
*/ */
@Override @Override
public void deleteContainer(Pipeline pipeline) throws IOException { public void deleteContainer(Pipeline pipeline, boolean force)
throws IOException {
} }

View File

@ -20,6 +20,9 @@ package org.apache.hadoop.ozone.scm;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
import org.apache.hadoop.ozone.scm.cli.ResultCode; import org.apache.hadoop.ozone.scm.cli.ResultCode;
import org.apache.hadoop.ozone.scm.cli.SCMCLI; import org.apache.hadoop.ozone.scm.cli.SCMCLI;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.scm.XceiverClientManager;
@ -27,7 +30,7 @@ import org.apache.hadoop.scm.client.ContainerOperationClient;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.junit.After; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
@ -52,6 +55,7 @@ public class TestSCMCli {
storageContainerLocationClient; storageContainerLocationClient;
private static StorageContainerManager scm; private static StorageContainerManager scm;
private static ContainerManager containerManager;
private static ByteArrayOutputStream outContent; private static ByteArrayOutputStream outContent;
private static PrintStream outStream; private static PrintStream outStream;
@ -73,10 +77,29 @@ public class TestSCMCli {
errStream = new PrintStream(errContent); errStream = new PrintStream(errContent);
cli = new SCMCLI(client, outStream, errStream); cli = new SCMCLI(client, outStream, errStream);
scm = cluster.getStorageContainerManager(); scm = cluster.getStorageContainerManager();
containerManager = cluster.getDataNodes().get(0)
.getOzoneContainerManager().getContainerManager();
} }
@After private int runCommandAndGetOutput(String[] cmd,
public void shutdown() throws InterruptedException { ByteArrayOutputStream out,
ByteArrayOutputStream err) throws Exception {
PrintStream cmdOutStream = System.out;
PrintStream cmdErrStream = System.err;
if(out != null) {
cmdOutStream = new PrintStream(out);
}
if (err != null) {
cmdErrStream = new PrintStream(err);
}
ScmClient client = new ContainerOperationClient(
storageContainerLocationClient, new XceiverClientManager(conf));
SCMCLI scmCLI = new SCMCLI(client, cmdOutStream, cmdErrStream);
return scmCLI.run(cmd);
}
@AfterClass
public static void shutdown() throws InterruptedException {
IOUtils.cleanup(null, storageContainerLocationClient, cluster); IOUtils.cleanup(null, storageContainerLocationClient, cluster);
} }
@ -98,6 +121,60 @@ public class TestSCMCli {
assertEquals(containerName, container.getContainerName()); assertEquals(containerName, container.getContainerName());
} }
@Test
public void testDeleteContainer() throws Exception {
final String cname1 = "cname1";
final String cname2 = "cname2";
// ****************************************
// 1. Test to delete a non-empty container.
// ****************************************
// Create an non-empty container
Pipeline pipeline1 = scm.allocateContainer(cname1);
ContainerData data1 = new ContainerData(cname1);
containerManager.createContainer(pipeline1, data1);
ContainerData cdata = containerManager.readContainer(cname1);
KeyUtils.getDB(cdata, conf).put(cname1.getBytes(),
"someKey".getBytes());
// Gracefully delete a container should fail because it is not empty.
String[] del1 = {"-container", "-del", cname1};
ByteArrayOutputStream testErr1 = new ByteArrayOutputStream();
int exitCode1 = runCommandAndGetOutput(del1, null, testErr1);
assertEquals(ResultCode.EXECUTION_ERROR, exitCode1);
assertTrue(testErr1.toString()
.contains("Container cannot be deleted because it is not empty."));
// Delete should fail when attempts to delete an open container.
// Even with the force tag.
String[] del2 = {"-container", "-del", cname1, "-f"};
ByteArrayOutputStream testErr2 = new ByteArrayOutputStream();
int exitCode2 = runCommandAndGetOutput(del2, null, testErr2);
assertEquals(ResultCode.EXECUTION_ERROR, exitCode2);
assertTrue(testErr2.toString()
.contains("Attempting to force delete an open container."));
// Close the container and try force delete again.
containerManager.closeContainer(cname1);
int exitCode3 = runCommandAndGetOutput(del2, null, null);
assertEquals(ResultCode.SUCCESS, exitCode3);
// ****************************************
// 2. Test to delete an empty container.
// ****************************************
// Create an empty container
Pipeline pipeline2 = scm.allocateContainer(cname2);
ContainerData data2 = new ContainerData(cname2);
containerManager.createContainer(pipeline2, data2);
// Successfully delete an empty container.
String[] del3 = {"-container", "-del", cname2};
int exitCode4 = runCommandAndGetOutput(del3, null, null);
assertEquals(ResultCode.SUCCESS, exitCode4);
}
@Test @Test
public void testNonExistCommand() throws Exception { public void testNonExistCommand() throws Exception {
PrintStream init = System.out; PrintStream init = System.out;
@ -139,7 +216,8 @@ public class TestSCMCli {
String expected1 = String expected1 =
"usage: hdfs scm -container <commands> <options>\n" + "usage: hdfs scm -container <commands> <options>\n" +
"where <commands> can be one of the following\n" + "where <commands> can be one of the following\n" +
" -create Create container\n"; " -create Create container\n" +
" -del <arg> Delete container\n";
assertEquals(expected1, testContent.toString()); assertEquals(expected1, testContent.toString());
testContent.reset(); testContent.reset();