From 1087ce4cd2034a69f6b573f91b78bde0cafe59a2 Mon Sep 17 00:00:00 2001 From: Weiwei Yang Date: Tue, 12 Sep 2017 11:12:08 +0800 Subject: [PATCH] HDFS-11676. Ozone: SCM CLI: Implement close container command. Contributed by Chen Liang. --- .../scm/client/ContainerOperationClient.java | 43 ++++++++++ .../apache/hadoop/scm/client/ScmClient.java | 8 ++ .../StorageContainerLocationProtocol.java | 8 ++ ...ocationProtocolClientSideTranslatorPB.java | 16 ++++ .../scm/storage/ContainerProtocolCalls.java | 23 +++++ .../StorageContainerLocationProtocol.proto | 12 +++ ...ocationProtocolServerSideTranslatorPB.java | 14 ++++ .../ozone/scm/StorageContainerManager.java | 11 +++ .../ozone/scm/block/BlockManagerImpl.java | 4 +- .../cli/container/CloseContainerHandler.java | 84 +++++++++++++++++++ .../container/ContainerCommandHandler.java | 11 ++- .../ozone/scm/container/ContainerMapping.java | 16 ++++ .../hadoop/ozone/scm/container/Mapping.java | 8 ++ .../ozone/scm/exceptions/SCMException.java | 5 +- .../hadoop/cblock/util/MockStorageClient.java | 6 ++ .../apache/hadoop/ozone/scm/TestSCMCli.java | 38 +++++++-- 16 files changed, 295 insertions(+), 12 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/container/CloseContainerHandler.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java index 3c1a2669005..eabd4503ede 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java @@ -255,6 +255,49 @@ public Pipeline getContainer(String containerId) throws return storageContainerLocationClient.getContainer(containerId); } + /** + * Close a container. + * + * @param pipeline the container to be closed. + * @throws IOException + */ + @Override + public void closeContainer(Pipeline pipeline) throws IOException { + XceiverClientSpi client = null; + try { + LOG.debug("Close container {}", pipeline); + /* + TODO: two orders here, revisit this later: + 1. close on SCM first, then on data node + 2. close on data node first, then on SCM + + with 1: if client failed after closing on SCM, then there is a + container SCM thinks as closed, but is actually open. Then SCM will no + longer allocate block to it, which is fine. But SCM may later try to + replicate this "closed" container, which I'm not sure is safe. + + with 2: if client failed after close on datanode, then there is a + container SCM thinks as open, but is actually closed. Then SCM will still + try to allocate block to it. Which will fail when actually doing the + write. No more data can be written, but at least the correctness and + consistency of existing data will maintain. + + For now, take the #2 way. + */ + // Actually close the container on Datanode + client = xceiverClientManager.acquireClient(pipeline); + String traceID = UUID.randomUUID().toString(); + ContainerProtocolCalls.closeContainer(client, traceID); + // Notify SCM to close the container + String containerId = pipeline.getContainerName(); + storageContainerLocationClient.closeContainer(containerId); + } finally { + if (client != null) { + xceiverClientManager.releaseClient(client); + } + } + } + /** * Get the the current usage information. * @param pipeline - Pipeline diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java index 2c2d244250d..6dbc1e7d1dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java @@ -54,6 +54,14 @@ public interface ScmClient { */ Pipeline getContainer(String containerId) throws IOException; + /** + * Close a container by name. + * + * @param pipeline the container to be closed. + * @throws IOException + */ + void closeContainer(Pipeline pipeline) throws IOException; + /** * Deletes an existing container. * @param pipeline - Pipeline that represents the container. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java index 94134d63403..95dccc073f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java @@ -108,4 +108,12 @@ void notifyObjectCreationStage( Pipeline createReplicationPipeline(OzoneProtos.ReplicationType type, OzoneProtos.ReplicationFactor factor, OzoneProtos.NodePool nodePool) throws IOException; + + /** + * Clsoe a container. + * + * @param containerName the name of the container to close. + * @throws IOException + */ + void closeContainer(String containerName) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 8dc1c6cb633..3705e31ab7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.protocol.proto.OzoneProtos; import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.CloseContainerRequestProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; @@ -269,6 +270,21 @@ public Pipeline createReplicationPipeline(OzoneProtos.ReplicationType } } + @Override + public void closeContainer(String containerName) throws IOException { + Preconditions.checkState(!Strings.isNullOrEmpty(containerName), + "Container name cannot be null or empty"); + CloseContainerRequestProto request = CloseContainerRequestProto + .newBuilder() + .setContainerName(containerName) + .build(); + try { + rpcProxy.closeContainer(NULL_RPC_CONTROLLER, request); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override public Object getUnderlyingProxyObject() { return rpcProxy; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ContainerProtocolCalls.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ContainerProtocolCalls.java index 3ec13b1c989..48e335f07b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ContainerProtocolCalls.java @@ -279,6 +279,29 @@ public static void deleteContainer(XceiverClientSpi client, validateContainerResponse(response); } + /** + * Close a container. + * + * @param client + * @param traceID + * @throws IOException + */ + public static void closeContainer(XceiverClientSpi client, String traceID) + throws IOException { + ContainerProtos.CloseContainerRequestProto.Builder closeRequest = + ContainerProtos.CloseContainerRequestProto.newBuilder(); + closeRequest.setPipeline(client.getPipeline().getProtobufMessage()); + + ContainerCommandRequestProto.Builder request = + ContainerCommandRequestProto.newBuilder(); + request.setCmdType(Type.CloseContainer); + request.setCloseContainer(closeRequest); + request.setTraceID(traceID); + ContainerCommandResponseProto response = + client.sendCommand(request.build()); + validateContainerResponse(response); + } + /** * readContainer call that gets meta data from an existing container. * diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto index 550f6a6b70d..da19b815c57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto @@ -64,6 +64,14 @@ message GetContainerResponseProto { required hadoop.hdfs.ozone.Pipeline pipeline = 1; } +message CloseContainerRequestProto { + required string containerName = 1; +} + +message CloseContainerResponseProto { + +} + message ListContainerRequestProto { required uint32 count = 1; optional string startName = 2; @@ -183,6 +191,10 @@ service StorageContainerLocationProtocolService { */ rpc notifyObjectCreationStage(NotifyObjectCreationStageRequestProto) returns (NotifyObjectCreationStageResponseProto); + /** + * Close a container. + */ + rpc closeContainer(CloseContainerRequestProto) returns (CloseContainerResponseProto); /* * Apis that Manage Pipelines. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java index f12aafbb13c..fce740c64e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -31,6 +31,8 @@ .StorageContainerLocationProtocolProtos; import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.CloseContainerRequestProto; +import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.CloseContainerResponseProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto; import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; @@ -173,6 +175,18 @@ public NotifyObjectCreationStageResponseProto notifyObjectCreationStage( } } + @Override + public CloseContainerResponseProto closeContainer( + RpcController controller, CloseContainerRequestProto request) + throws ServiceException { + try { + impl.closeContainer(request.getContainerName()); + return CloseContainerResponseProto.newBuilder().build(); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + } + @Override public PipelineResponseProto allocatePipeline( RpcController controller, PipelineRequestProto request) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java index 6677b65c78c..5f476e44d6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java @@ -398,6 +398,11 @@ public Pipeline getContainer(String containerName) throws IOException { return scmContainerManager.getContainer(containerName).getPipeline(); } + @VisibleForTesting + ContainerInfo getContainerInfo(String containerName) throws IOException { + return scmContainerManager.getContainer(containerName); + } + /** * {@inheritDoc} */ @@ -487,6 +492,12 @@ public Pipeline createReplicationPipeline( return null; } + @Override + public void closeContainer(String containerName) throws IOException { + checkAdminAccess(); + scmContainerManager.closeContainer(containerName); + } + /** * Queries a list of Node that match a set of statuses. *

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManagerImpl.java index 1a2dc149e66..472dc7d08d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManagerImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManagerImpl.java @@ -65,7 +65,7 @@ import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes. FAILED_TO_FIND_CONTAINER; import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes. - FAILED_TO_FIND_CONTAINER_WITH_SAPCE; + FAILED_TO_FIND_CONTAINER_WITH_SPACE; import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes. FAILED_TO_FIND_BLOCK; import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes. @@ -384,7 +384,7 @@ public AllocatedBlock allocateBlock(final long size) throws IOException { // now we should have some candidates in ALLOCATE state if (candidates.size() == 0) { throw new SCMException("Fail to find any container to allocate block " - + "of size " + size + ".", FAILED_TO_FIND_CONTAINER_WITH_SAPCE); + + "of size " + size + ".", FAILED_TO_FIND_CONTAINER_WITH_SPACE); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/container/CloseContainerHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/container/CloseContainerHandler.java new file mode 100644 index 00000000000..7a523053734 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/container/CloseContainerHandler.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.scm.cli.container; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler; +import org.apache.hadoop.scm.client.ScmClient; +import org.apache.hadoop.scm.container.common.helpers.Pipeline; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH; +import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP; + +/** + * The handler of close container command. + */ +public class CloseContainerHandler extends OzoneCommandHandler { + + public static final String CONTAINER_CLOSE = "close"; + public static final String OPT_CONTAINER_NAME = "c"; + + @Override + public void execute(CommandLine cmd) throws IOException { + if (!cmd.hasOption(CONTAINER_CLOSE)) { + throw new IOException("Expecting container close"); + } + if (!cmd.hasOption(OPT_CONTAINER_NAME)) { + displayHelp(); + if (!cmd.hasOption(HELP_OP)) { + throw new IOException("Expecting container name"); + } else { + return; + } + } + String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME); + + Pipeline pipeline = getScmClient().getContainer(containerName); + if (pipeline == null) { + throw new IOException("Cannot close an non-exist container " + + containerName); + } + logOut("Closing container : %s.", containerName); + getScmClient().closeContainer(pipeline); + logOut("Container closed."); + } + + @Override + public void displayHelp() { + Options options = new Options(); + addOptions(options); + HelpFormatter helpFormatter = new HelpFormatter(); + helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -close