HDFS-13415. Ozone: Remove cblock code from HDFS-7240. Contributed by Elek, Marton.

This commit is contained in:
Mukul Kumar Singh 2018-04-11 18:42:16 +05:30 committed by Owen O'Malley
parent aae3ba24ca
commit ec6c8742e5
79 changed files with 1 additions and 10238 deletions

View File

@ -52,7 +52,6 @@
<exclude>**/SecurityAuth.audit*</exclude>
<exclude>hadoop-ozone/**</exclude>
<exclude>hadoop-hdds/**</exclude>
<exclude>hadoop-cblock/**</exclude>
</excludes>
</fileSet>
</fileSets>

View File

@ -1,61 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId>
<version>3.2.0-SNAPSHOT</version>
<relativePath>../hadoop-project-dist</relativePath>
</parent>
<artifactId>hadoop-cblock</artifactId>
<version>3.2.0-SNAPSHOT</version>
<description>Apache Hadoop Cblock parent project</description>
<name>Apache Hadoop Cblock</name>
<packaging>pom</packaging>
<modules>
<module>server</module>
<module>tools</module>
</modules>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>findbugs-maven-plugin</artifactId>
<configuration>
<excludeFilterFile combine.self="override"></excludeFilterFile>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,21 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<FindBugsFilter>
<Match>
<Package name="org.apache.hadoop.cblock.protocol.proto"/>
</Match>
</FindBugsFilter>

View File

@ -1,159 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cblock</artifactId>
<version>3.2.0-SNAPSHOT</version>
</parent>
<artifactId>hadoop-cblock-server</artifactId>
<version>3.2.0-SNAPSHOT</version>
<description>Apache Hadoop CBlock Server</description>
<name>Apache Hadoop CBlock Server</name>
<packaging>jar</packaging>
<properties>
<hadoop.component>cblock</hadoop.component>
<is.hadoop.component>true</is.hadoop.component>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-server-framework</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-integration-test</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.jscsi</groupId>
<artifactId>target</artifactId>
<version>2.6.0</version>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.kubernetes</groupId>
<artifactId>client-java</artifactId>
<version>1.0.0-beta1</version>
<exclusions>
<exclusion>
<groupId>io.swagger</groupId>
<artifactId>swagger-annotations</artifactId>
</exclusion>
<exclusion>
<groupId>com.github.stefanbirkner</groupId>
<artifactId>system-rules</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<executions>
<execution>
<id>compile-protoc</id>
<goals>
<goal>protoc</goal>
</goals>
<configuration>
<protocVersion>${protobuf.version}</protocVersion>
<protocCommand>${protoc.path}</protocCommand>
<imports>
<param>
${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
</param>
<param>
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/
</param>
<param>
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
</param>
<param>
${basedir}/../../hadoop-hdds/common/src/main/proto/
</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>CBlockClientServerProtocol.proto</include>
<include>CBlockServiceProtocol.proto</include>
</includes>
</source>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>findbugs-maven-plugin</artifactId>
<configuration>
<excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,222 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import static java.lang.Thread.NORM_PRIORITY;
/**
* This class contains constants for configuration keys used in CBlock.
*/
public final class CBlockConfigKeys {
public static final String DFS_CBLOCK_SERVICERPC_ADDRESS_KEY =
"dfs.cblock.servicerpc-address";
public static final int DFS_CBLOCK_SERVICERPC_PORT_DEFAULT =
9810;
public static final String DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT =
"0.0.0.0";
public static final String DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY =
"dfs.cblock.jscsi-address";
//The port on CBlockManager node for jSCSI to ask
public static final String DFS_CBLOCK_JSCSI_PORT_KEY =
"dfs.cblock.jscsi.port";
public static final int DFS_CBLOCK_JSCSI_PORT_DEFAULT =
9811;
public static final String DFS_CBLOCK_SERVICERPC_BIND_HOST_KEY =
"dfs.cblock.service.rpc-bind-host";
public static final String DFS_CBLOCK_JSCSIRPC_BIND_HOST_KEY =
"dfs.cblock.jscsi.rpc-bind-host";
// default block size is 4KB
public static final int DFS_CBLOCK_SERVICE_BLOCK_SIZE_DEFAULT =
4096;
public static final String DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_KEY =
"dfs.cblock.service.handler.count";
public static final int DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_DEFAULT = 10;
public static final String DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY =
"dfs.cblock.service.leveldb.path";
//TODO : find a better place
public static final String DFS_CBLOCK_SERVICE_LEVELDB_PATH_DEFAULT =
"/tmp/cblock_levelDB.dat";
public static final String DFS_CBLOCK_DISK_CACHE_PATH_KEY =
"dfs.cblock.disk.cache.path";
public static final String DFS_CBLOCK_DISK_CACHE_PATH_DEFAULT =
"/tmp/cblockCacheDB";
/**
* Setting this flag to true makes the block layer compute a sha256 hash of
* the data and log that information along with block ID. This is very
* useful for doing trace based simulation of various workloads. Since it is
* computing a hash for each block this could be expensive, hence default
* is false.
*/
public static final String DFS_CBLOCK_TRACE_IO = "dfs.cblock.trace.io";
public static final boolean DFS_CBLOCK_TRACE_IO_DEFAULT = false;
public static final String DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO =
"dfs.cblock.short.circuit.io";
public static final boolean DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO_DEFAULT =
false;
/**
* Cache size in 1000s of entries. 256 indicates 256 * 1024.
*/
public static final String DFS_CBLOCK_CACHE_QUEUE_SIZE_KB =
"dfs.cblock.cache.queue.size.in.kb";
public static final int DFS_CBLOCK_CACHE_QUEUE_SIZE_KB_DEFAULT = 256;
/**
* Minimum Number of threads that cache pool will use for background I/O.
*/
public static final String DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE =
"dfs.cblock.cache.core.min.pool.size";
public static final int DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT = 16;
/**
* Maximum Number of threads that cache pool will use for background I/O.
*/
public static final String DFS_CBLOCK_CACHE_MAX_POOL_SIZE =
"dfs.cblock.cache.max.pool.size";
public static final int DFS_CBLOCK_CACHE_MAX_POOL_SIZE_DEFAULT = 256;
/**
* Number of seconds to keep the Thread alive when it is idle.
*/
public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE =
"dfs.cblock.cache.keep.alive";
public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT = "60s";
/**
* Priority of cache flusher thread, affecting the relative performance of
* write and read.
*/
public static final String DFS_CBLOCK_CACHE_THREAD_PRIORITY =
"dfs.cblock.cache.thread.priority";
public static final int DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT =
NORM_PRIORITY;
/**
* Block Buffer size in terms of blockID entries, 512 means 512 blockIDs.
*/
public static final String DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE =
"dfs.cblock.cache.block.buffer.size";
public static final int DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT = 512;
public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL =
"dfs.cblock.block.buffer.flush.interval";
public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT =
"60s";
// jscsi server settings
public static final String DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY =
"dfs.cblock.jscsi.server.address";
public static final String DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT =
"0.0.0.0";
public static final String DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_KEY =
"dfs.cblock.jscsi.cblock.server.address";
public static final String DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_DEFAULT =
"127.0.0.1";
// to what address cblock server should talk to scm?
public static final String DFS_CBLOCK_SCM_IPADDRESS_KEY =
"dfs.cblock.scm.ipaddress";
public static final String DFS_CBLOCK_SCM_IPADDRESS_DEFAULT =
"127.0.0.1";
public static final String DFS_CBLOCK_SCM_PORT_KEY =
"dfs.cblock.scm.port";
public static final int DFS_CBLOCK_SCM_PORT_DEFAULT = 9860;
public static final String DFS_CBLOCK_CONTAINER_SIZE_GB_KEY =
"dfs.cblock.container.size.gb";
public static final int DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT =
5;
// LevelDB cache file uses an off-heap cache in LevelDB of 256 MB.
public static final String DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_KEY =
"dfs.cblock.cache.leveldb.cache.size.mb";
public static final int DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_DEFAULT = 256;
/**
* Cache does an best case attempt to write a block to a container.
* At some point of time, we will need to handle the case where we did try
* 64K times and is till not able to write to the container.
*
* TODO: We will need cBlock Server to allow us to do a remapping of the
* block location in case of failures, at that point we should reduce the
* retry count to a more normal number. This is approximately 18 hours of
* retry.
*/
public static final String DFS_CBLOCK_CACHE_MAX_RETRY_KEY =
"dfs.cblock.cache.max.retry";
public static final int DFS_CBLOCK_CACHE_MAX_RETRY_DEFAULT =
64 * 1024;
/**
* Cblock CLI configs.
*/
public static final String DFS_CBLOCK_MANAGER_POOL_SIZE =
"dfs.cblock.manager.pool.size";
public static final int DFS_CBLOCK_MANAGER_POOL_SIZE_DEFAULT = 16;
/**
* currently the largest supported volume is about 8TB, which might take
* > 20 seconds to finish creating containers. thus set timeout to 30 sec.
*/
public static final String DFS_CBLOCK_RPC_TIMEOUT =
"dfs.cblock.rpc.timeout";
public static final String DFS_CBLOCK_RPC_TIMEOUT_DEFAULT = "300s";
public static final String DFS_CBLOCK_ISCSI_ADVERTISED_IP =
"dfs.cblock.iscsi.advertised.ip";
public static final String DFS_CBLOCK_ISCSI_ADVERTISED_PORT =
"dfs.cblock.iscsi.advertised.port";
public static final int DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT = 3260;
public static final String
DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED
= "dfs.cblock.kubernetes.dynamic-provisioner.enabled";
public static final boolean
DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED_DEFAULT = false;
public static final String
DFS_CBLOCK_KUBERNETES_CBLOCK_USER =
"dfs.cblock.kubernetes.cblock-user";
public static final String
DFS_CBLOCK_KUBERNETES_CBLOCK_USER_DEFAULT =
"iqn.2001-04.org.apache.hadoop";
public static final String
DFS_CBLOCK_KUBERNETES_CONFIG_FILE_KEY =
"dfs.cblock.kubernetes.configfile";
private CBlockConfigKeys() {
}
}

View File

@ -1,426 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
import org.apache.hadoop.cblock.kubernetes.DynamicProvisioner;
import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.CBlockClientProtocol;
import org.apache.hadoop.cblock.proto.CBlockServiceProtocol;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.protocol.proto
.CBlockClientServerProtocolProtos;
import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos;
import org.apache.hadoop.cblock.protocolPB.CBlockClientServerProtocolPB;
import org.apache.hadoop.cblock.protocolPB
.CBlockClientServerProtocolServerSideTranslatorPB;
import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.cblock.protocolPB
.CBlockServiceProtocolServerSideTranslatorPB;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.cblock.storage.StorageManager;
import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.utils.LevelDBStore;
import static org.apache.hadoop.cblock.CblockUtils.getCblockServerRpcAddr;
import static org.apache.hadoop.cblock.CblockUtils.getCblockServiceRpcAddr;
import static org.apache.hadoop.hdds.server.ServerUtils
.updateRPCListenAddress;
import org.iq80.leveldb.DBIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CONTAINER_SIZE_GB_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSIRPC_BIND_HOST_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SCM_IPADDRESS_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SCM_IPADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SCM_PORT_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SCM_PORT_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_BIND_HOST_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICE_LEVELDB_PATH_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED_DEFAULT;
/**
* The main entry point of CBlock operations, ALL the CBlock operations
* will go through this class. But NOTE that:
*
* volume operations (create/
* delete/info) are:
* client -> CBlockManager -> StorageManager -> CBlock client
*
* IO operations (put/get block) are;
* client -> CBlock client -> container
*
*/
public class CBlockManager implements CBlockServiceProtocol,
CBlockClientProtocol {
private static final Logger LOG =
LoggerFactory.getLogger(CBlockManager.class);
private final RPC.Server cblockService;
private final RPC.Server cblockServer;
private final StorageManager storageManager;
private final LevelDBStore levelDBStore;
private final String dbPath;
private final DynamicProvisioner kubernetesDynamicProvisioner;
private Charset encoding = Charset.forName("UTF-8");
public CBlockManager(OzoneConfiguration conf,
ScmClient storageClient) throws IOException {
// Fix the cBlockManagerId generattion code here. Should support
// cBlockManager --init command which will generate a cBlockManagerId and
// persist it locally.
storageManager =
new StorageManager(storageClient, conf, "CBLOCK");
dbPath = conf.getTrimmed(DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY,
DFS_CBLOCK_SERVICE_LEVELDB_PATH_DEFAULT);
levelDBStore = new LevelDBStore(new File(dbPath), true);
LOG.info("Try to load exising volume information");
readFromPersistentStore();
RPC.setProtocolEngine(conf, CBlockServiceProtocolPB.class,
ProtobufRpcEngine.class);
RPC.setProtocolEngine(conf, CBlockClientServerProtocolPB.class,
ProtobufRpcEngine.class);
// start service for client command-to-cblock server service
InetSocketAddress serviceRpcAddr =
getCblockServiceRpcAddr(conf);
BlockingService cblockProto =
CBlockServiceProtocolProtos
.CBlockServiceProtocolService
.newReflectiveBlockingService(
new CBlockServiceProtocolServerSideTranslatorPB(this)
);
cblockService = startRpcServer(conf, CBlockServiceProtocolPB.class,
cblockProto, serviceRpcAddr,
DFS_CBLOCK_SERVICERPC_BIND_HOST_KEY,
DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_KEY,
DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_DEFAULT);
InetSocketAddress cblockServiceRpcAddress =
updateRPCListenAddress(conf,
DFS_CBLOCK_SERVICERPC_ADDRESS_KEY, serviceRpcAddr, cblockService);
LOG.info("CBlock manager listening for client commands on: {}",
cblockServiceRpcAddress);
// now start service for cblock client-to-cblock server communication
InetSocketAddress serverRpcAddr =
getCblockServerRpcAddr(conf);
BlockingService serverProto =
CBlockClientServerProtocolProtos
.CBlockClientServerProtocolService
.newReflectiveBlockingService(
new CBlockClientServerProtocolServerSideTranslatorPB(this)
);
cblockServer = startRpcServer(
conf, CBlockClientServerProtocolPB.class,
serverProto, serverRpcAddr,
DFS_CBLOCK_JSCSIRPC_BIND_HOST_KEY,
DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_KEY,
DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_DEFAULT);
InetSocketAddress cblockServerRpcAddress =
updateRPCListenAddress(conf,
DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY, serverRpcAddr, cblockServer);
LOG.info("CBlock server listening for client commands on: {}",
cblockServerRpcAddress);
if (conf.getBoolean(DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED,
DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED_DEFAULT)) {
kubernetesDynamicProvisioner =
new DynamicProvisioner(conf, storageManager);
kubernetesDynamicProvisioner.init();
} else {
kubernetesDynamicProvisioner = null;
}
}
public void start() {
cblockService.start();
cblockServer.start();
if (kubernetesDynamicProvisioner != null) {
kubernetesDynamicProvisioner.start();
}
LOG.info("CBlock manager started!");
}
public void stop() {
cblockService.stop();
cblockServer.stop();
if (kubernetesDynamicProvisioner != null) {
kubernetesDynamicProvisioner.stop();
}
}
public void join() {
try {
cblockService.join();
cblockServer.join();
} catch (InterruptedException e) {
LOG.error("Interrupted during join");
Thread.currentThread().interrupt();
}
}
/**
* Starts an RPC server, if configured.
*
* @param conf configuration
* @param protocol RPC protocol provided by RPC server
* @param instance RPC protocol implementation instance
* @param addr configured address of RPC server
* @param bindHostKey configuration key for setting explicit bind host. If
* the property is not configured, then the bind host is taken from addr.
* @param handlerCountKey configuration key for RPC server handler count
* @param handlerCountDefault default RPC server handler count if unconfigured
* @return RPC server, or null if addr is null
* @throws IOException if there is an I/O error while creating RPC server
*/
private static RPC.Server startRpcServer(OzoneConfiguration conf,
Class<?> protocol, BlockingService instance,
InetSocketAddress addr, String bindHostKey,
String handlerCountKey, int handlerCountDefault) throws IOException {
if (addr == null) {
return null;
}
String bindHost = conf.getTrimmed(bindHostKey);
if (bindHost == null || bindHost.isEmpty()) {
bindHost = addr.getHostName();
}
int numHandlers = conf.getInt(handlerCountKey, handlerCountDefault);
RPC.Server rpcServer = new RPC.Builder(conf)
.setProtocol(protocol)
.setInstance(instance)
.setBindAddress(bindHost)
.setPort(addr.getPort())
.setNumHandlers(numHandlers)
.setVerbose(false)
.setSecretManager(null)
.build();
return rpcServer;
}
@Override
public synchronized MountVolumeResponse mountVolume(
String userName, String volumeName) throws IOException {
return storageManager.isVolumeValid(userName, volumeName);
}
@Override
public List<VolumeInfo> listVolumes() throws IOException {
return listVolume(null);
}
@Override
public synchronized void createVolume(String userName, String volumeName,
long volumeSize, int blockSize) throws IOException {
LOG.info("Create volume received: userName: {} volumeName: {} " +
"volumeSize: {} blockSize: {}", userName, volumeName,
volumeSize, blockSize);
// It is important to create in-memory representation of the
// volume first, then writes to persistent storage (levelDB)
// such that it is guaranteed that when there is an entry in
// levelDB, the volume is allocated. (more like a UNDO log fashion)
// TODO: what if creation failed? we allocated containers but lost
// the reference to the volume and all it's containers. How to release
// the containers?
storageManager.createVolume(userName, volumeName, volumeSize, blockSize);
VolumeDescriptor volume = storageManager.getVolume(userName, volumeName);
if (volume == null) {
throw new IOException("Volume creation failed!");
}
String volumeKey = KeyUtil.getVolumeKey(userName, volumeName);
writeToPersistentStore(volumeKey.getBytes(encoding),
volume.toProtobuf().toByteArray());
}
@Override
public synchronized void deleteVolume(String userName,
String volumeName, boolean force) throws IOException {
LOG.info("Delete volume received: volume: {} {} ", volumeName, force);
storageManager.deleteVolume(userName, volumeName, force);
// being here means volume is successfully deleted now
String volumeKey = KeyUtil.getVolumeKey(userName, volumeName);
removeFromPersistentStore(volumeKey.getBytes(encoding));
}
// No need to synchronize on the following three methods, since write and
// remove's caller are synchronized. read's caller is the constructor and
// no other method call can happen at that time.
@VisibleForTesting
public void writeToPersistentStore(byte[] key, byte[] value) {
levelDBStore.put(key, value);
}
@VisibleForTesting
public void removeFromPersistentStore(byte[] key) {
levelDBStore.delete(key);
}
public void readFromPersistentStore() throws IOException {
try (DBIterator iter = levelDBStore.getIterator()) {
iter.seekToFirst();
while (iter.hasNext()) {
Map.Entry<byte[], byte[]> entry = iter.next();
String volumeKey = new String(entry.getKey(), encoding);
try {
VolumeDescriptor volumeDescriptor =
VolumeDescriptor.fromProtobuf(entry.getValue());
storageManager.addVolume(volumeDescriptor);
} catch (IOException e) {
LOG.error("Loading volume " + volumeKey + " error " + e);
}
}
}
}
@Override
public synchronized VolumeInfo infoVolume(String userName, String volumeName
) throws IOException {
LOG.info("Info volume received: volume: {}", volumeName);
return storageManager.infoVolume(userName, volumeName);
}
@VisibleForTesting
public synchronized List<VolumeDescriptor> getAllVolumes() {
return storageManager.getAllVolume(null);
}
public synchronized List<VolumeDescriptor> getAllVolumes(String userName) {
return storageManager.getAllVolume(userName);
}
public synchronized void close() {
try {
levelDBStore.close();
} catch (IOException e) {
LOG.error("Error when closing levelDB " + e);
}
}
public synchronized void clean() {
try {
levelDBStore.close();
levelDBStore.destroy();
} catch (IOException e) {
LOG.error("Error when deleting levelDB " + e);
}
}
@Override
public synchronized List<VolumeInfo> listVolume(String userName)
throws IOException {
ArrayList<VolumeInfo> response = new ArrayList<>();
List<VolumeDescriptor> allVolumes =
storageManager.getAllVolume(userName);
for (VolumeDescriptor volume : allVolumes) {
VolumeInfo info =
new VolumeInfo(volume.getUserName(), volume.getVolumeName(),
volume.getVolumeSize(), volume.getBlockSize());
response.add(info);
}
return response;
}
public static void main(String[] args) throws Exception {
long version = RPC.getProtocolVersion(
StorageContainerLocationProtocolPB.class);
CblockUtils.activateConfigs();
OzoneConfiguration ozoneConf = new OzoneConfiguration();
String scmAddress = ozoneConf.get(DFS_CBLOCK_SCM_IPADDRESS_KEY,
DFS_CBLOCK_SCM_IPADDRESS_DEFAULT);
int scmPort = ozoneConf.getInt(DFS_CBLOCK_SCM_PORT_KEY,
DFS_CBLOCK_SCM_PORT_DEFAULT);
int containerSizeGB = ozoneConf.getInt(DFS_CBLOCK_CONTAINER_SIZE_GB_KEY,
DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT);
ContainerOperationClient.setContainerSizeB(containerSizeGB* OzoneConsts.GB);
InetSocketAddress address = new InetSocketAddress(scmAddress, scmPort);
ozoneConf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
LOG.info(
"Creating StorageContainerLocationProtocol RPC client with address {}",
address);
RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,
ProtobufRpcEngine.class);
StorageContainerLocationProtocolClientSideTranslatorPB client =
new StorageContainerLocationProtocolClientSideTranslatorPB(
RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
address, UserGroupInformation.getCurrentUser(), ozoneConf,
NetUtils.getDefaultSocketFactory(ozoneConf),
Client.getRpcTimeout(ozoneConf)));
ScmClient storageClient = new ContainerOperationClient(
client, new XceiverClientManager(ozoneConf));
CBlockManager cbm = new CBlockManager(ozoneConf, storageClient);
cbm.start();
cbm.join();
}
}

View File

@ -1,129 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.cblock;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import com.google.common.base.Optional;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSI_PORT_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_PORT_DEFAULT;
import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
/**
* Generic stateless utility functions for CBlock components.
*/
public class CblockUtils {
private CblockUtils() {
}
/**
* Retrieve the socket address that is used by CBlock Service.
*
* @param conf
* @return Target InetSocketAddress for the CBlock Service endpoint.
*/
public static InetSocketAddress getCblockServiceRpcAddr(Configuration conf) {
final Optional<String> host =
getHostNameFromConfigKeys(conf, DFS_CBLOCK_SERVICERPC_ADDRESS_KEY);
// If no port number is specified then we'll just try the defaultBindPort.
final Optional<Integer> port =
getPortNumberFromConfigKeys(conf, DFS_CBLOCK_SERVICERPC_ADDRESS_KEY);
return NetUtils.createSocketAddr(
host.or(DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT) + ":" + port
.or(DFS_CBLOCK_SERVICERPC_PORT_DEFAULT));
}
/**
* Retrieve the socket address that is used by CBlock Server.
*
* @param conf
* @return Target InetSocketAddress for the CBlock Server endpoint.
*/
public static InetSocketAddress getCblockServerRpcAddr(Configuration conf) {
final Optional<String> host =
getHostNameFromConfigKeys(conf, DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY);
// If no port number is specified then we'll just try the defaultBindPort.
final Optional<Integer> port =
getPortNumberFromConfigKeys(conf, DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY);
return NetUtils.createSocketAddr(
host.or(DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT) + ":" + port
.or(DFS_CBLOCK_JSCSI_PORT_DEFAULT));
}
/**
* Parse size with size prefix string and return in bytes.
*
*/
public static long parseSize(String volumeSizeArgs) throws IOException {
long multiplier = 1;
Pattern p = Pattern.compile("([0-9]+)([a-zA-Z]+)");
Matcher m = p.matcher(volumeSizeArgs);
if (!m.find()) {
throw new IOException("Invalid volume size args " + volumeSizeArgs);
}
int size = Integer.parseInt(m.group(1));
String s = m.group(2);
if (s.equalsIgnoreCase("MB") ||
s.equalsIgnoreCase("Mi")) {
multiplier = 1024L * 1024;
} else if (s.equalsIgnoreCase("GB") ||
s.equalsIgnoreCase("Gi")) {
multiplier = 1024L * 1024 * 1024;
} else if (s.equalsIgnoreCase("TB") ||
s.equalsIgnoreCase("Ti")) {
multiplier = 1024L * 1024 * 1024 * 1024;
} else {
throw new IOException("Invalid volume size args " + volumeSizeArgs);
}
return size * multiplier;
}
public static void activateConfigs(){
Configuration.addDefaultResource("hdfs-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
Configuration.addDefaultResource("ozone-default.xml");
Configuration.addDefaultResource("ozone-site.xml");
Configuration.addDefaultResource("cblock-default.xml");
Configuration.addDefaultResource("cblock-site.xml");
}
}

View File

@ -1,135 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.client;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.CBlockServiceProtocol;
import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos;
import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* The client side implement of CBlockServiceProtocol.
*/
@InterfaceAudience.Private
public final class CBlockServiceProtocolClientSideTranslatorPB
implements CBlockServiceProtocol, ProtocolTranslator, Closeable {
private final CBlockServiceProtocolPB rpcProxy;
public CBlockServiceProtocolClientSideTranslatorPB(
CBlockServiceProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public void createVolume(String userName, String volumeName,
long volumeSize, int blockSize) throws IOException {
CBlockServiceProtocolProtos.CreateVolumeRequestProto.Builder req =
CBlockServiceProtocolProtos.CreateVolumeRequestProto.newBuilder();
req.setUserName(userName);
req.setVolumeName(volumeName);
req.setVolumeSize(volumeSize);
req.setBlockSize(blockSize);
try {
rpcProxy.createVolume(null, req.build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void deleteVolume(String userName, String volumeName, boolean force)
throws IOException {
CBlockServiceProtocolProtos.DeleteVolumeRequestProto.Builder req =
CBlockServiceProtocolProtos.DeleteVolumeRequestProto.newBuilder();
req.setUserName(userName);
req.setVolumeName(volumeName);
req.setForce(force);
try {
rpcProxy.deleteVolume(null, req.build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
@Override
public VolumeInfo infoVolume(String userName, String volumeName)
throws IOException {
CBlockServiceProtocolProtos.InfoVolumeRequestProto.Builder req =
CBlockServiceProtocolProtos.InfoVolumeRequestProto.newBuilder();
req.setUserName(userName);
req.setVolumeName(volumeName);
try {
CBlockServiceProtocolProtos.InfoVolumeResponseProto resp =
rpcProxy.infoVolume(null, req.build());
return new VolumeInfo(resp.getVolumeInfo().getUserName(),
resp.getVolumeInfo().getVolumeName(),
resp.getVolumeInfo().getVolumeSize(),
resp.getVolumeInfo().getBlockSize(),
resp.getVolumeInfo().getUsage());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public List<VolumeInfo> listVolume(String userName) throws IOException {
CBlockServiceProtocolProtos.ListVolumeRequestProto.Builder req =
CBlockServiceProtocolProtos.ListVolumeRequestProto.newBuilder();
if (userName != null) {
req.setUserName(userName);
}
try {
CBlockServiceProtocolProtos.ListVolumeResponseProto resp =
rpcProxy.listVolume(null, req.build());
List<VolumeInfo> respList = new ArrayList<>();
for (CBlockServiceProtocolProtos.VolumeInfoProto entry :
resp.getVolumeEntryList()) {
VolumeInfo volumeInfo = new VolumeInfo(
entry.getUserName(), entry.getVolumeName(), entry.getVolumeSize(),
entry.getBlockSize());
respList.add(volumeInfo);
}
return respList;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
} catch (Exception e) {
throw new IOException("got" + e.getCause() + " " + e.getMessage());
}
}
}

View File

@ -1,83 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.client;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.cblock.CBlockConfigKeys;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import static org.apache.hadoop.cblock.CblockUtils.getCblockServiceRpcAddr;
/**
* Implementation of client used by CBlock command line tool.
*/
public class CBlockVolumeClient {
private final CBlockServiceProtocolClientSideTranslatorPB cblockClient;
public CBlockVolumeClient(OzoneConfiguration conf) throws IOException {
this(conf, null);
}
public CBlockVolumeClient(OzoneConfiguration conf,
InetSocketAddress serverAddress) throws IOException {
InetSocketAddress address = serverAddress != null ? serverAddress :
getCblockServiceRpcAddr(conf);
long version = RPC.getProtocolVersion(CBlockServiceProtocolPB.class);
int rpcTimeout = Math.toIntExact(
conf.getTimeDuration(CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT,
CBlockConfigKeys.DFS_CBLOCK_RPC_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS));
cblockClient = new CBlockServiceProtocolClientSideTranslatorPB(
RPC.getProtocolProxy(CBlockServiceProtocolPB.class, version,
address, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf), rpcTimeout, RetryPolicies
.retryUpToMaximumCountWithFixedSleep(
300, 1, TimeUnit.SECONDS)).getProxy());
}
public void createVolume(String userName, String volumeName,
long volumeSize, int blockSize) throws IOException {
cblockClient.createVolume(userName, volumeName,
volumeSize, blockSize);
}
public void deleteVolume(String userName, String volumeName, boolean force)
throws IOException {
cblockClient.deleteVolume(userName, volumeName, force);
}
public VolumeInfo infoVolume(String userName, String volumeName)
throws IOException {
return cblockClient.infoVolume(userName, volumeName);
}
public List<VolumeInfo> listVolume(String userName)
throws IOException {
return cblockClient.listVolume(userName);
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.client;

View File

@ -1,29 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.exception;
import java.io.IOException;
/**
* The exception class used in CBlock.
*/
public class CBlockException extends IOException {
public CBlockException(String message) {
super(message);
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.exception;

View File

@ -1,175 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.AsyncBlockWriter;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Paths;
/**
* The blockWriter task.
*/
public class BlockWriterTask implements Runnable {
private final LogicalBlock block;
private int tryCount;
private final ContainerCacheFlusher flusher;
private final String dbPath;
private final String fileName;
private final int maxRetryCount;
/**
* Constructs a BlockWriterTask.
*
* @param block - Block Information.
* @param flusher - ContainerCacheFlusher.
*/
public BlockWriterTask(LogicalBlock block, ContainerCacheFlusher flusher,
String dbPath, int tryCount, String fileName, int maxRetryCount) {
this.block = block;
this.flusher = flusher;
this.dbPath = dbPath;
this.tryCount = tryCount;
this.fileName = fileName;
this.maxRetryCount = maxRetryCount;
}
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
String containerName = null;
XceiverClientSpi client = null;
LevelDBStore levelDBStore = null;
String traceID = flusher.getTraceID(new File(dbPath), block.getBlockID());
flusher.getLOG().debug(
"Writing block to remote. block ID: {}", block.getBlockID());
try {
incTryCount();
Pipeline pipeline = flusher.getPipeline(this.dbPath, block.getBlockID());
client = flusher.getXceiverClientManager().acquireClient(pipeline);
containerName = pipeline.getContainerName();
byte[] keybuf = Longs.toByteArray(block.getBlockID());
byte[] data;
long startTime = Time.monotonicNow();
levelDBStore = flusher.getCacheDB(this.dbPath);
data = levelDBStore.get(keybuf);
Preconditions.checkNotNull(data);
long endTime = Time.monotonicNow();
Preconditions.checkState(data.length > 0, "Block data is zero length");
startTime = Time.monotonicNow();
ContainerProtocolCalls.writeSmallFile(client, containerName,
Long.toString(block.getBlockID()), data, traceID);
endTime = Time.monotonicNow();
flusher.getTargetMetrics().updateContainerWriteLatency(
endTime - startTime);
flusher.getLOG().debug("Time taken for Write Small File : {} ms",
endTime - startTime);
flusher.incrementRemoteIO();
} catch (Exception ex) {
flusher.getLOG().error("Writing of block:{} failed, We have attempted " +
"to write this block {} times to the container {}.Trace ID:{}",
block.getBlockID(), this.getTryCount(), containerName, traceID, ex);
writeRetryBlock(block);
if (ex instanceof IOException) {
flusher.getTargetMetrics().incNumWriteIOExceptionRetryBlocks();
} else {
flusher.getTargetMetrics().incNumWriteGenericExceptionRetryBlocks();
}
if (this.getTryCount() >= maxRetryCount) {
flusher.getTargetMetrics().incNumWriteMaxRetryBlocks();
}
} finally {
flusher.incFinishCount(fileName);
if (levelDBStore != null) {
flusher.releaseCacheDB(dbPath);
}
if(client != null) {
flusher.getXceiverClientManager().releaseClient(client);
}
}
}
private void writeRetryBlock(LogicalBlock currentBlock) {
boolean append = false;
String retryFileName =
String.format("%s.%d.%s.%s", AsyncBlockWriter.RETRY_LOG_PREFIX,
currentBlock.getBlockID(), Time.monotonicNow(), tryCount);
File logDir = new File(this.dbPath);
if (!logDir.exists() && !logDir.mkdirs()) {
flusher.getLOG().error(
"Unable to create the log directory, Critical error cannot continue");
return;
}
String log = Paths.get(this.dbPath, retryFileName).toString();
ByteBuffer buffer = ByteBuffer.allocate(Long.SIZE / Byte.SIZE);
buffer.putLong(currentBlock.getBlockID());
buffer.flip();
try {
FileChannel channel = new FileOutputStream(log, append).getChannel();
channel.write(buffer);
channel.close();
flusher.processDirtyBlocks(this.dbPath, retryFileName);
} catch (IOException e) {
flusher.getTargetMetrics().incNumFailedRetryLogFileWrites();
flusher.getLOG().error("Unable to write the retry block. Block ID: {}",
currentBlock.getBlockID(), e);
}
}
/**
* Increments the try count. This is done each time we try this block
* write to the container.
*/
private void incTryCount() {
tryCount++;
}
/**
* Get the retry count.
*
* @return int
*/
public int getTryCount() {
return tryCount;
}
}

View File

@ -1,147 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;
import com.google.common.primitives.Longs;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.cblock.exception.CBlockException;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.CBlockClientProtocol;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.protocol.proto
.CBlockClientServerProtocolProtos.ContainerIDProto;
import org.apache.hadoop.cblock.protocol.proto
.CBlockClientServerProtocolProtos.ListVolumesRequestProto;
import org.apache.hadoop.cblock.protocol.proto
.CBlockClientServerProtocolProtos.ListVolumesResponseProto;
import org.apache.hadoop.cblock.protocol.proto
.CBlockClientServerProtocolProtos.MountVolumeRequestProto;
import org.apache.hadoop.cblock.protocol.proto
.CBlockClientServerProtocolProtos.MountVolumeResponseProto;
import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos
.VolumeInfoProto;
import org.apache.hadoop.cblock.protocolPB.CBlockClientServerProtocolPB;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
/**
* The client side of CBlockClientProtocol.
*
* CBlockClientProtocol is the protocol used between cblock client side
* and cblock manager (cblock client side is just the node where jscsi daemon
* process runs. a machines talks to jscsi daemon for mounting a volume).
*
* Right now, the only communication carried by this protocol is for client side
* to request mounting a volume.
*/
public class CBlockClientProtocolClientSideTranslatorPB
implements CBlockClientProtocol, ProtocolTranslator, Closeable {
private final CBlockClientServerProtocolPB rpcProxy;
public CBlockClientProtocolClientSideTranslatorPB(
CBlockClientServerProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
@Override
public MountVolumeResponse mountVolume(
String userName, String volumeName) throws IOException {
MountVolumeRequestProto.Builder
request
= MountVolumeRequestProto
.newBuilder();
request.setUserName(userName);
request.setVolumeName(volumeName);
try {
MountVolumeResponseProto resp
= rpcProxy.mountVolume(null, request.build());
if (!resp.getIsValid()) {
throw new CBlockException(
"Not a valid volume:" + userName + ":" + volumeName);
}
List<Pipeline> containerIDs = new ArrayList<>();
HashMap<String, Pipeline> containerPipelines = new HashMap<>();
if (resp.getAllContainerIDsList().size() == 0) {
throw new CBlockException("Mount volume request returned no container");
}
for (ContainerIDProto containerID :
resp.getAllContainerIDsList()) {
if (containerID.hasPipeline()) {
// it should always have a pipeline only except for tests.
Pipeline p = Pipeline.getFromProtoBuf(containerID.getPipeline());
p.setData(Longs.toByteArray(containerID.getIndex()));
containerIDs.add(p);
containerPipelines.put(containerID.getContainerID(), p);
} else {
throw new CBlockException("ContainerID does not have pipeline!");
}
}
return new MountVolumeResponse(
resp.getIsValid(),
resp.getUserName(),
resp.getVolumeName(),
resp.getVolumeSize(),
resp.getBlockSize(),
containerIDs,
containerPipelines);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public List<VolumeInfo> listVolumes() throws IOException {
try {
List<VolumeInfo> result = new ArrayList<>();
ListVolumesResponseProto
listVolumesResponseProto = this.rpcProxy.listVolumes(null,
ListVolumesRequestProto.newBuilder()
.build());
for (VolumeInfoProto volumeInfoProto :
listVolumesResponseProto
.getVolumeEntryList()) {
result.add(new VolumeInfo(volumeInfoProto.getUserName(),
volumeInfoProto.getVolumeName(), volumeInfoProto.getVolumeSize(),
volumeInfoProto.getBlockSize()));
}
return result;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}

View File

@ -1,440 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.cblock.jscsiHelper.cache.CacheModule;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.jscsi.target.storage.IStorageModule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_TRACE_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_TRACE_IO_DEFAULT;
/**
* The SCSI Target class for CBlockSCSIServer.
*/
final public class CBlockIStorageImpl implements IStorageModule {
private static final Logger LOGGER =
LoggerFactory.getLogger(CBlockIStorageImpl.class);
private static final Logger TRACER =
LoggerFactory.getLogger("TraceIO");
private CacheModule cache;
private final long volumeSize;
private final int blockSize;
private final String userName;
private final String volumeName;
private final boolean traceEnabled;
private final Configuration conf;
private final ContainerCacheFlusher flusher;
private List<Pipeline> fullContainerList;
/**
* private: constructs a SCSI Target.
*
* @param config - config
* @param userName - Username
* @param volumeName - Name of the volume
* @param volumeSize - Size of the volume
* @param blockSize - Size of the block
* @param fullContainerList - Ordered list of containers that make up this
* volume.
* @param flusher - flusher which is used to flush data from
* level db cache to containers
* @throws IOException - Throws IOException.
*/
private CBlockIStorageImpl(Configuration config, String userName,
String volumeName, long volumeSize, int blockSize,
List<Pipeline> fullContainerList, ContainerCacheFlusher flusher) {
this.conf = config;
this.userName = userName;
this.volumeName = volumeName;
this.volumeSize = volumeSize;
this.blockSize = blockSize;
this.fullContainerList = new ArrayList<>(fullContainerList);
this.flusher = flusher;
this.traceEnabled = conf.getBoolean(DFS_CBLOCK_TRACE_IO,
DFS_CBLOCK_TRACE_IO_DEFAULT);
}
/**
* private: initialize the cache.
*
* @param xceiverClientManager - client manager that is used for creating new
* connections to containers.
* @param metrics - target metrics to maintain metrics for target server
* @throws IOException - Throws IOException.
*/
private void initCache(XceiverClientManager xceiverClientManager,
CBlockTargetMetrics metrics) throws IOException {
this.cache = CBlockLocalCache.newBuilder()
.setConfiguration(conf)
.setVolumeName(this.volumeName)
.setUserName(this.userName)
.setPipelines(this.fullContainerList)
.setClientManager(xceiverClientManager)
.setBlockSize(blockSize)
.setVolumeSize(volumeSize)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
this.cache.start();
}
/**
* Gets a new builder for CBlockStorageImpl.
*
* @return builder
*/
public static Builder newBuilder() {
return new Builder();
}
/**
* Get Cache.
*
* @return - Cache
*/
public CacheModule getCache() {
return cache;
}
/**
* Returns block size of this volume.
*
* @return int size of block for this volume.
*/
@Override
public int getBlockSize() {
return blockSize;
}
/**
* Checks the index boundary of a block address.
*
* @param logicalBlockAddress the index of the first block of data to be read
* or written
* @param transferLengthInBlocks the total number of consecutive blocks about
* to be read or written
* @return 0 == Success, 1 indicates the LBA address is out of bounds and 2
* indicates that LBA + transfer size is out of bounds.
*/
@Override
public int checkBounds(long logicalBlockAddress, int transferLengthInBlocks) {
long sizeInBlocks = volumeSize / blockSize;
int res = 0;
if (logicalBlockAddress < 0 || logicalBlockAddress >= sizeInBlocks) {
res = 1;
}
if (transferLengthInBlocks < 0 ||
logicalBlockAddress + transferLengthInBlocks > sizeInBlocks) {
if (res == 0) {
res = 2;
}
}
return res;
}
/**
* Number of blocks that make up this volume.
*
* @return long - count of blocks.
*/
@Override
public long getSizeInBlocks() {
return volumeSize / blockSize;
}
/**
* Reads the number of bytes that can be read into the bytes buffer from the
* location indicated.
*
* @param bytes the array into which the data will be copied will be filled
* with data from storage
* @param storageIndex the position of the first byte to be copied
* @throws IOException
*/
@Override
public void read(byte[] bytes, long storageIndex) throws IOException {
int startingIdxInBlock = (int) storageIndex % blockSize;
int idxInBytes = 0;
if (this.traceEnabled) {
TRACER.info("Task=ReadStart,length={},location={}",
bytes.length, storageIndex);
}
while (idxInBytes < bytes.length - 1) {
long blockId = (storageIndex + idxInBytes) / blockSize;
byte[] dataBytes;
try {
LogicalBlock block = this.cache.get(blockId);
dataBytes = block.getData().array();
if (this.traceEnabled) {
TRACER.info("Task=ReadBlock,BlockID={},length={},SHA={}",
blockId,
dataBytes.length,
dataBytes.length > 0 ? DigestUtils.sha256Hex(dataBytes) : null);
}
} catch (IOException e) {
// For an non-existing block cache.get will return a block with zero
// bytes filled. So any error here is a real error.
LOGGER.error("getting errors when reading data:" + e);
throw e;
}
int length = blockSize - startingIdxInBlock;
if (length > bytes.length - idxInBytes) {
length = bytes.length - idxInBytes;
}
if (dataBytes.length >= length) {
System.arraycopy(dataBytes, startingIdxInBlock, bytes, idxInBytes,
length);
}
startingIdxInBlock = 0;
idxInBytes += length;
}
if (this.traceEnabled) {
TRACER.info("Task=ReadEnd,length={},location={},SHA={}",
bytes.length, storageIndex, DigestUtils.sha256Hex(bytes));
}
}
@Override
public void write(byte[] bytes, long storageIndex) throws IOException {
int startingIdxInBlock = (int) storageIndex % blockSize;
int idxInBytes = 0;
if (this.traceEnabled) {
TRACER.info("Task=WriteStart,length={},location={},SHA={}",
bytes.length, storageIndex,
bytes.length > 0 ? DigestUtils.sha256Hex(bytes) : null);
}
ByteBuffer dataByte = ByteBuffer.allocate(blockSize);
while (idxInBytes < bytes.length - 1) {
long blockId = (storageIndex + idxInBytes) / blockSize;
int length = blockSize - startingIdxInBlock;
if (length > bytes.length - idxInBytes) {
length = bytes.length - idxInBytes;
}
System.arraycopy(bytes, idxInBytes, dataByte.array(), startingIdxInBlock,
length);
this.cache.put(blockId, dataByte.array());
if (this.traceEnabled) {
TRACER.info("Task=WriteBlock,BlockID={},length={},SHA={}",
blockId, dataByte.array().length,
dataByte.array().length > 0 ?
DigestUtils.sha256Hex(dataByte.array()) : null);
}
dataByte.clear();
startingIdxInBlock = 0;
idxInBytes += length;
}
if (this.traceEnabled) {
TRACER.info("Task=WriteEnd,length={},location={} ",
bytes.length, storageIndex);
}
}
@Override
public void close() throws IOException {
try {
cache.close();
} catch (IllegalStateException ise) {
LOGGER.error("Can not close the storage {}", ise);
throw ise;
}
}
/**
* Builder class for CBlocklocalCache.
*/
public static class Builder {
private String userName;
private String volumeName;
private long volumeSize;
private int blockSize;
private List<Pipeline> containerList;
private Configuration conf;
private XceiverClientManager clientManager;
private ContainerCacheFlusher flusher;
private CBlockTargetMetrics metrics;
/**
* Constructs a builder.
*/
Builder() {
}
public Builder setFlusher(ContainerCacheFlusher cacheFlusher) {
this.flusher = cacheFlusher;
return this;
}
/**
* set config.
*
* @param config - config
* @return Builder
*/
public Builder setConf(Configuration config) {
this.conf = config;
return this;
}
/**
* set user name.
*
* @param cblockUserName - user name
* @return Builder
*/
public Builder setUserName(String cblockUserName) {
this.userName = cblockUserName;
return this;
}
/**
* set volume name.
*
* @param cblockVolumeName -- volume name
* @return Builder
*/
public Builder setVolumeName(String cblockVolumeName) {
this.volumeName = cblockVolumeName;
return this;
}
/**
* set volume size.
*
* @param cblockVolumeSize -- set volume size.
* @return Builder
*/
public Builder setVolumeSize(long cblockVolumeSize) {
this.volumeSize = cblockVolumeSize;
return this;
}
/**
* set block size.
*
* @param cblockBlockSize -- block size
* @return Builder
*/
public Builder setBlockSize(int cblockBlockSize) {
this.blockSize = cblockBlockSize;
return this;
}
/**
* Set contianer list.
*
* @param cblockContainerList - set the pipeline list
* @return Builder
*/
public Builder setContainerList(List<Pipeline> cblockContainerList) {
this.containerList = cblockContainerList;
return this;
}
/**
* Set client manager.
*
* @param xceiverClientManager -- sets the client manager.
* @return Builder
*/
public Builder setClientManager(XceiverClientManager xceiverClientManager) {
this.clientManager = xceiverClientManager;
return this;
}
/**
* Set Cblock Target Metrics.
*
* @param targetMetrics -- sets the cblock target metrics
* @return Builder
*/
public Builder setCBlockTargetMetrics(CBlockTargetMetrics targetMetrics) {
this.metrics = targetMetrics;
return this;
}
/**
* Builds the CBlockStorageImpl.
*
* @return builds the CBlock Scsi Target.
*/
public CBlockIStorageImpl build() throws IOException {
if (StringUtils.isBlank(userName)) {
throw new IllegalArgumentException("User name cannot be null or empty" +
".");
}
if (StringUtils.isBlank(volumeName)) {
throw new IllegalArgumentException("Volume name cannot be null or " +
"empty");
}
if (volumeSize < 1) {
throw new IllegalArgumentException("Volume size cannot be negative or" +
" zero.");
}
if (blockSize < 1) {
throw new IllegalArgumentException("Block size cannot be negative or " +
"zero.");
}
if (containerList == null || containerList.size() == 0) {
throw new IllegalArgumentException("Container list cannot be null or " +
"empty");
}
if (clientManager == null) {
throw new IllegalArgumentException("Client manager cannot be null");
}
if (conf == null) {
throw new IllegalArgumentException("Configuration cannot be null");
}
if (flusher == null) {
throw new IllegalArgumentException("Flusher Cannot be null.");
}
CBlockIStorageImpl impl = new CBlockIStorageImpl(this.conf, this.userName,
this.volumeName, this.volumeSize, this.blockSize, this.containerList,
this.flusher);
impl.initCache(this.clientManager, this.metrics);
return impl;
}
}
}

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import java.io.IOException;
import java.util.List;
/**
* This class is the handler of CBlockManager used by target server
* to communicate with CBlockManager.
*
* More specifically, this class will expose local methods to target
* server, and make RPC calls to CBlockManager accordingly
*/
public class CBlockManagerHandler {
private final CBlockClientProtocolClientSideTranslatorPB handler;
public CBlockManagerHandler(
CBlockClientProtocolClientSideTranslatorPB handler) {
this.handler = handler;
}
public MountVolumeResponse mountVolume(
String userName, String volumeName) throws IOException {
return handler.mountVolume(userName, volumeName);
}
public List<VolumeInfo> listVolumes() throws IOException {
return handler.listVolumes();
}
}

View File

@ -1,334 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableRate;
/**
* This class is for maintaining the various Cblock Target statistics
* and publishing them through the metrics interfaces.
* This also registers the JMX MBean for RPC.
*
* This class maintains stats like cache hit and miss ratio
* as well as the latency time of read and write ops.
*/
public class CBlockTargetMetrics {
// IOPS based Metrics
@Metric private MutableCounterLong numReadOps;
@Metric private MutableCounterLong numWriteOps;
@Metric private MutableCounterLong numReadCacheHits;
@Metric private MutableCounterLong numReadCacheMiss;
@Metric private MutableCounterLong numDirectBlockWrites;
// Cblock internal Metrics
@Metric private MutableCounterLong numDirtyLogBlockRead;
@Metric private MutableCounterLong numBytesDirtyLogRead;
@Metric private MutableCounterLong numBytesDirtyLogWritten;
@Metric private MutableCounterLong numBlockBufferFlushCompleted;
@Metric private MutableCounterLong numBlockBufferFlushTriggered;
@Metric private MutableCounterLong numBlockBufferUpdates;
@Metric private MutableCounterLong numRetryLogBlockRead;
@Metric private MutableCounterLong numBytesRetryLogRead;
// Failure Metrics
@Metric private MutableCounterLong numReadLostBlocks;
@Metric private MutableCounterLong numFailedReadBlocks;
@Metric private MutableCounterLong numWriteIOExceptionRetryBlocks;
@Metric private MutableCounterLong numWriteGenericExceptionRetryBlocks;
@Metric private MutableCounterLong numFailedDirectBlockWrites;
@Metric private MutableCounterLong numIllegalDirtyLogFiles;
@Metric private MutableCounterLong numFailedDirtyLogFileDeletes;
@Metric private MutableCounterLong numFailedBlockBufferFlushes;
@Metric private MutableCounterLong numInterruptedBufferWaits;
@Metric private MutableCounterLong numFailedRetryLogFileWrites;
@Metric private MutableCounterLong numWriteMaxRetryBlocks;
@Metric private MutableCounterLong numFailedReleaseLevelDB;
// Latency based Metrics
@Metric private MutableRate dbReadLatency;
@Metric private MutableRate containerReadLatency;
@Metric private MutableRate dbWriteLatency;
@Metric private MutableRate containerWriteLatency;
@Metric private MutableRate blockBufferFlushLatency;
@Metric private MutableRate directBlockWriteLatency;
public CBlockTargetMetrics() {
}
public static CBlockTargetMetrics create() {
MetricsSystem ms = DefaultMetricsSystem.instance();
return ms.register("CBlockTargetMetrics",
"CBlock Target Metrics",
new CBlockTargetMetrics());
}
public void incNumReadOps() {
numReadOps.incr();
}
public void incNumWriteOps() {
numWriteOps.incr();
}
public void incNumReadCacheHits() {
numReadCacheHits.incr();
}
public void incNumReadCacheMiss() {
numReadCacheMiss.incr();
}
public void incNumReadLostBlocks() {
numReadLostBlocks.incr();
}
public void incNumDirectBlockWrites() {
numDirectBlockWrites.incr();
}
public void incNumWriteIOExceptionRetryBlocks() {
numWriteIOExceptionRetryBlocks.incr();
}
public void incNumWriteGenericExceptionRetryBlocks() {
numWriteGenericExceptionRetryBlocks.incr();
}
public void incNumFailedDirectBlockWrites() {
numFailedDirectBlockWrites.incr();
}
public void incNumFailedReadBlocks() {
numFailedReadBlocks.incr();
}
public void incNumBlockBufferFlushCompleted() {
numBlockBufferFlushCompleted.incr();
}
public void incNumBlockBufferFlushTriggered() {
numBlockBufferFlushTriggered.incr();
}
public void incNumDirtyLogBlockRead() {
numDirtyLogBlockRead.incr();
}
public void incNumBytesDirtyLogRead(int bytes) {
numBytesDirtyLogRead.incr(bytes);
}
public void incNumBlockBufferUpdates() {
numBlockBufferUpdates.incr();
}
public void incNumRetryLogBlockRead() {
numRetryLogBlockRead.incr();
}
public void incNumBytesRetryLogRead(int bytes) {
numBytesRetryLogRead.incr(bytes);
}
public void incNumBytesDirtyLogWritten(int bytes) {
numBytesDirtyLogWritten.incr(bytes);
}
public void incNumFailedBlockBufferFlushes() {
numFailedBlockBufferFlushes.incr();
}
public void incNumInterruptedBufferWaits() {
numInterruptedBufferWaits.incr();
}
public void incNumIllegalDirtyLogFiles() {
numIllegalDirtyLogFiles.incr();
}
public void incNumFailedDirtyLogFileDeletes() {
numFailedDirtyLogFileDeletes.incr();
}
public void incNumFailedRetryLogFileWrites() {
numFailedRetryLogFileWrites.incr();
}
public void incNumWriteMaxRetryBlocks() {
numWriteMaxRetryBlocks.incr();
}
public void incNumFailedReleaseLevelDB() {
numFailedReleaseLevelDB.incr();
}
public void updateDBReadLatency(long latency) {
dbReadLatency.add(latency);
}
public void updateContainerReadLatency(long latency) {
containerReadLatency.add(latency);
}
public void updateDBWriteLatency(long latency) {
dbWriteLatency.add(latency);
}
public void updateContainerWriteLatency(long latency) {
containerWriteLatency.add(latency);
}
public void updateDirectBlockWriteLatency(long latency) {
directBlockWriteLatency.add(latency);
}
public void updateBlockBufferFlushLatency(long latency) {
blockBufferFlushLatency.add(latency);
}
@VisibleForTesting
public long getNumReadOps() {
return numReadOps.value();
}
@VisibleForTesting
public long getNumWriteOps() {
return numWriteOps.value();
}
@VisibleForTesting
public long getNumReadCacheHits() {
return numReadCacheHits.value();
}
@VisibleForTesting
public long getNumReadCacheMiss() {
return numReadCacheMiss.value();
}
@VisibleForTesting
public long getNumReadLostBlocks() {
return numReadLostBlocks.value();
}
@VisibleForTesting
public long getNumDirectBlockWrites() {
return numDirectBlockWrites.value();
}
@VisibleForTesting
public long getNumFailedDirectBlockWrites() {
return numFailedDirectBlockWrites.value();
}
@VisibleForTesting
public long getNumFailedReadBlocks() {
return numFailedReadBlocks.value();
}
@VisibleForTesting
public long getNumWriteIOExceptionRetryBlocks() {
return numWriteIOExceptionRetryBlocks.value();
}
@VisibleForTesting
public long getNumWriteGenericExceptionRetryBlocks() {
return numWriteGenericExceptionRetryBlocks.value();
}
@VisibleForTesting
public long getNumBlockBufferFlushCompleted() {
return numBlockBufferFlushCompleted.value();
}
@VisibleForTesting
public long getNumBlockBufferFlushTriggered() {
return numBlockBufferFlushTriggered.value();
}
@VisibleForTesting
public long getNumDirtyLogBlockRead() {
return numDirtyLogBlockRead.value();
}
@VisibleForTesting
public long getNumBytesDirtyLogReads() {
return numBytesDirtyLogRead.value();
}
@VisibleForTesting
public long getNumBlockBufferUpdates() {
return numBlockBufferUpdates.value();
}
@VisibleForTesting
public long getNumRetryLogBlockRead() {
return numRetryLogBlockRead.value();
}
@VisibleForTesting
public long getNumBytesRetryLogReads() {
return numBytesRetryLogRead.value();
}
@VisibleForTesting
public long getNumBytesDirtyLogWritten() {
return numBytesDirtyLogWritten.value();
}
@VisibleForTesting
public long getNumFailedBlockBufferFlushes() {
return numFailedBlockBufferFlushes.value();
}
@VisibleForTesting
public long getNumInterruptedBufferWaits() {
return numInterruptedBufferWaits.value();
}
@VisibleForTesting
public long getNumIllegalDirtyLogFiles() {
return numIllegalDirtyLogFiles.value();
}
@VisibleForTesting
public long getNumFailedDirtyLogFileDeletes() {
return numFailedDirtyLogFileDeletes.value();
}
@VisibleForTesting
public long getNumFailedRetryLogFileWrites() {
return numFailedRetryLogFileWrites.value();
}
@VisibleForTesting
public long getNumWriteMaxRetryBlocks() {
return numWriteMaxRetryBlocks.value();
}
@VisibleForTesting
public long getNumFailedReleaseLevelDB() {
return numFailedReleaseLevelDB.value();
}
}

View File

@ -1,128 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.jscsi.target.Configuration;
import org.jscsi.target.Target;
import org.jscsi.target.TargetServer;
import java.io.IOException;
import java.util.HashMap;
/**
* This class extends JSCSI target server, which is a ISCSI target that can be
* recognized by a remote machine with ISCSI installed.
*/
public final class CBlockTargetServer extends TargetServer {
private final OzoneConfiguration conf;
private final CBlockManagerHandler cBlockManagerHandler;
private final XceiverClientManager xceiverClientManager;
private final ContainerCacheFlusher containerCacheFlusher;
private final CBlockTargetMetrics metrics;
public CBlockTargetServer(OzoneConfiguration ozoneConfig,
Configuration jscsiConf,
CBlockManagerHandler cBlockManagerHandler,
CBlockTargetMetrics metrics)
throws IOException {
super(jscsiConf);
this.cBlockManagerHandler = cBlockManagerHandler;
this.xceiverClientManager = new XceiverClientManager(ozoneConfig);
this.conf = ozoneConfig;
this.containerCacheFlusher = new ContainerCacheFlusher(this.conf,
xceiverClientManager, metrics);
this.metrics = metrics;
LOGGER.info("Starting flusher thread.");
Thread flushListenerThread = new Thread(containerCacheFlusher);
flushListenerThread.setDaemon(true);
flushListenerThread.start();
}
public static void main(String[] args) throws Exception {
}
@Override
public boolean isValidTargetName(String checkTargetName) {
if (!KeyUtil.isValidVolumeKey(checkTargetName)) {
return false;
}
String userName = KeyUtil.getUserNameFromVolumeKey(checkTargetName);
String volumeName = KeyUtil.getVolumeFromVolumeKey(checkTargetName);
if (userName == null || volumeName == null) {
return false;
}
try {
MountVolumeResponse result =
cBlockManagerHandler.mountVolume(userName, volumeName);
if (!result.getIsValid()) {
LOGGER.error("Not a valid volume:" + checkTargetName);
return false;
}
String volumeKey = KeyUtil.getVolumeKey(result.getUserName(),
result.getVolumeName());
if (!targets.containsKey(volumeKey)) {
LOGGER.info("Mounting Volume. username: {} volume:{}",
userName, volumeName);
CBlockIStorageImpl ozoneStore = CBlockIStorageImpl.newBuilder()
.setUserName(userName)
.setVolumeName(volumeName)
.setVolumeSize(result.getVolumeSize())
.setBlockSize(result.getBlockSize())
.setContainerList(result.getContainerList())
.setClientManager(xceiverClientManager)
.setConf(this.conf)
.setFlusher(containerCacheFlusher)
.setCBlockTargetMetrics(metrics)
.build();
Target target = new Target(volumeKey, volumeKey, ozoneStore);
targets.put(volumeKey, target);
}
} catch (IOException e) {
LOGGER.error("Can not connect to server when validating target!"
+ e.getMessage());
}
return targets.containsKey(checkTargetName);
}
@Override
public String[] getTargetNames() {
try {
if (cBlockManagerHandler != null) {
return cBlockManagerHandler.listVolumes().
stream().map(
volumeInfo -> volumeInfo.getUserName() + ":" + volumeInfo
.getVolumeName()).toArray(String[]::new);
} else {
return new String[0];
}
} catch (IOException e) {
LOGGER.error("Can't list existing volumes", e);
return new String[0];
}
}
@VisibleForTesting
public HashMap<String, Target> getTargets() {
return targets;
}
}

View File

@ -1,599 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.cblock.CBlockConfigKeys;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.AsyncBlockWriter;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.DiskBlock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore;
import org.iq80.leveldb.Options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_KEEP_ALIVE;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_MAX_POOL_SIZE;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_MAX_POOL_SIZE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_QUEUE_SIZE_KB;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_QUEUE_SIZE_KB_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_THREAD_PRIORITY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_DEFAULT;
/**
* Class that writes to remote containers.
*/
public class ContainerCacheFlusher implements Runnable {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerCacheFlusher.class);
private final LinkedBlockingQueue<Message> messageQueue;
private final ThreadPoolExecutor threadPoolExecutor;
private final ArrayBlockingQueue<Runnable> workQueue;
private final ConcurrentMap<String, RefCountedDB> dbMap;
private final ByteBuffer blockIDBuffer;
private final ConcurrentMap<String, Pipeline[]> pipelineMap;
private final AtomicLong remoteIO;
private final XceiverClientManager xceiverClientManager;
private final CBlockTargetMetrics metrics;
private AtomicBoolean shutdown;
private final long levelDBCacheSize;
private final int maxRetryCount;
private final String tracePrefix;
private final ConcurrentMap<String, FinishCounter> finishCountMap;
/**
* Constructs the writers to remote queue.
*/
public ContainerCacheFlusher(Configuration config,
XceiverClientManager xceiverClientManager,
CBlockTargetMetrics metrics) {
int queueSize = config.getInt(DFS_CBLOCK_CACHE_QUEUE_SIZE_KB,
DFS_CBLOCK_CACHE_QUEUE_SIZE_KB_DEFAULT) * 1024;
int corePoolSize = config.getInt(DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE,
DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT);
int maxPoolSize = config.getInt(DFS_CBLOCK_CACHE_MAX_POOL_SIZE,
DFS_CBLOCK_CACHE_MAX_POOL_SIZE_DEFAULT);
long keepAlive = config.getTimeDuration(DFS_CBLOCK_CACHE_KEEP_ALIVE,
DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT, TimeUnit.SECONDS);
int threadPri = config.getInt(DFS_CBLOCK_CACHE_THREAD_PRIORITY,
DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT);
int blockBufferSize = config.getInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE,
DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT) * (Long.SIZE / Byte.SIZE);
levelDBCacheSize = config.getInt(DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_KEY,
DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_DEFAULT) * OzoneConsts.MB;
LOG.info("Cache: Core Pool Size: {}", corePoolSize);
LOG.info("Cache: Keep Alive: {}", keepAlive);
LOG.info("Cache: Max Pool Size: {}", maxPoolSize);
LOG.info("Cache: Thread Pri: {}", threadPri);
LOG.info("Cache: BlockBuffer Size: {}", blockBufferSize);
shutdown = new AtomicBoolean(false);
messageQueue = new LinkedBlockingQueue<>();
workQueue = new ArrayBlockingQueue<>(queueSize, true);
ThreadFactory workerThreadFactory = new ThreadFactoryBuilder()
.setNameFormat("Cache Block Writer Thread #%d")
.setDaemon(true)
.setPriority(threadPri)
.build();
threadPoolExecutor = new ThreadPoolExecutor(corePoolSize, maxPoolSize,
keepAlive, TimeUnit.SECONDS, workQueue, workerThreadFactory,
new ThreadPoolExecutor.AbortPolicy());
threadPoolExecutor.prestartAllCoreThreads();
dbMap = new ConcurrentHashMap<>();
pipelineMap = new ConcurrentHashMap<>();
blockIDBuffer = ByteBuffer.allocateDirect(blockBufferSize);
this.xceiverClientManager = xceiverClientManager;
this.metrics = metrics;
this.remoteIO = new AtomicLong();
this.finishCountMap = new ConcurrentHashMap<>();
this.maxRetryCount =
config.getInt(CBlockConfigKeys.DFS_CBLOCK_CACHE_MAX_RETRY_KEY,
CBlockConfigKeys.DFS_CBLOCK_CACHE_MAX_RETRY_DEFAULT);
this.tracePrefix = getTracePrefix();
}
private void checkExistingLog(String prefixFileName, File dbPath) {
if (!dbPath.exists()) {
LOG.debug("No existing dirty log found at {}", dbPath);
return;
}
LOG.debug("Need to check and requeue existing dirty log {}", dbPath);
HashMap<String, ArrayList<String>> allFiles = new HashMap<>();
traverse(prefixFileName, dbPath, allFiles);
for (Map.Entry<String, ArrayList<String>> entry : allFiles.entrySet()) {
String parentPath = entry.getKey();
for (String fileName : entry.getValue()) {
LOG.info("found {} {} with prefix {}",
parentPath, fileName, prefixFileName);
processDirtyBlocks(parentPath, fileName);
}
}
}
private void traverse(String prefixFileName, File path,
HashMap<String, ArrayList<String>> files) {
if (path.isFile()) {
if (path.getName().startsWith(prefixFileName)) {
LOG.debug("found this {} with {}", path.getParent(), path.getName());
if (!files.containsKey(path.getParent())) {
files.put(path.getParent(), new ArrayList<>());
}
files.get(path.getParent()).add(path.getName());
}
} else {
File[] listFiles = path.listFiles();
if (listFiles != null) {
for (File subPath : listFiles) {
traverse(prefixFileName, subPath, files);
}
}
}
}
/**
* Gets the CBlockTargetMetrics.
*
* @return CBlockTargetMetrics
*/
public CBlockTargetMetrics getTargetMetrics() {
return metrics;
}
/**
* Gets the getXceiverClientManager.
*
* @return XceiverClientManager
*/
public XceiverClientManager getXceiverClientManager() {
return xceiverClientManager;
}
/**
* Shutdown this instance.
*/
public void shutdown() {
this.shutdown.set(true);
threadPoolExecutor.shutdown();
}
public long incrementRemoteIO() {
return remoteIO.incrementAndGet();
}
/**
* Processes a block cache file and queues those blocks for the remote I/O.
*
* @param dbPath - Location where the DB can be found.
* @param fileName - Block Cache File Name
*/
public void processDirtyBlocks(String dbPath, String fileName) {
LOG.info("Adding {}/{} to queue. Queue Length: {}", dbPath, fileName,
messageQueue.size());
this.messageQueue.add(new Message(dbPath, fileName));
}
public Logger getLOG() {
return LOG;
}
/**
* Opens a DB if needed or returns a handle to an already open DB.
*
* @param dbPath -- dbPath
* @return the levelDB on the given path.
* @throws IOException
*/
public synchronized LevelDBStore openDB(String dbPath)
throws IOException {
if (dbMap.containsKey(dbPath)) {
RefCountedDB refDB = dbMap.get(dbPath);
refDB.open();
return refDB.db;
} else {
Options options = new Options();
options.cacheSize(levelDBCacheSize);
options.createIfMissing(true);
LevelDBStore cacheDB = new LevelDBStore(
new File(getDBFileName(dbPath)), options);
RefCountedDB refDB = new RefCountedDB(dbPath, cacheDB);
dbMap.put(dbPath, refDB);
return cacheDB;
}
}
/**
* Updates the container map. This data never changes so we will update this
* during restarts and it should not hurt us.
*
* Once a CBlockLocalCache cache is registered, requeue dirty/retry log files
* for the volume
*
* @param dbPath - DbPath
* @param containerList - Container List.
*/
public void register(String dbPath, Pipeline[] containerList) {
File dbFile = Paths.get(dbPath).toFile();
pipelineMap.put(dbPath, containerList);
checkExistingLog(AsyncBlockWriter.DIRTY_LOG_PREFIX, dbFile);
checkExistingLog(AsyncBlockWriter.RETRY_LOG_PREFIX, dbFile);
}
private String getDBFileName(String dbPath) {
return dbPath + ".db";
}
public LevelDBStore getCacheDB(String dbPath) throws IOException {
return openDB(dbPath);
}
public void releaseCacheDB(String dbPath) {
try {
closeDB(dbPath);
} catch (Exception e) {
metrics.incNumFailedReleaseLevelDB();
LOG.error("LevelDB close failed, dbPath:" + dbPath, e);
}
}
/**
* Close the DB if we don't have any outstanding references.
*
* @param dbPath - dbPath
* @throws IOException
*/
public synchronized void closeDB(String dbPath) throws IOException {
if (dbMap.containsKey(dbPath)) {
RefCountedDB refDB = dbMap.get(dbPath);
int count = refDB.close();
if (count == 0) {
dbMap.remove(dbPath);
}
}
}
Pipeline getPipeline(String dbPath, long blockId) {
Pipeline[] containerList = pipelineMap.get(dbPath);
Preconditions.checkNotNull(containerList);
int containerIdx = (int) blockId % containerList.length;
long cBlockIndex =
Longs.fromByteArray(containerList[containerIdx].getData());
if (cBlockIndex > 0) {
// This catches the case when we get a wrong container in the ordering
// of the containers.
Preconditions.checkState(containerIdx % cBlockIndex == 0,
"The container ID computed should match with the container index " +
"returned from cBlock Server.");
}
return containerList[containerIdx];
}
public void incFinishCount(String fileName) {
if (!finishCountMap.containsKey(fileName)) {
LOG.error("No record for such file:" + fileName);
return;
}
finishCountMap.get(fileName).incCount();
if (finishCountMap.get(fileName).isFileDeleted()) {
finishCountMap.remove(fileName);
}
}
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
while (!this.shutdown.get()) {
try {
Message message = messageQueue.take();
LOG.debug("Got message to process -- DB Path : {} , FileName; {}",
message.getDbPath(), message.getFileName());
String fullPath = Paths.get(message.getDbPath(),
message.getFileName()).toString();
String[] fileNameParts = message.getFileName().split("\\.");
Preconditions.checkState(fileNameParts.length > 1);
String fileType = fileNameParts[0];
boolean isDirtyLogFile =
fileType.equalsIgnoreCase(AsyncBlockWriter.DIRTY_LOG_PREFIX);
ReadableByteChannel fileChannel = new FileInputStream(fullPath)
.getChannel();
// TODO: We can batch and unique the IOs here. First getting the code
// to work, we will add those later.
int bytesRead = fileChannel.read(blockIDBuffer);
fileChannel.close();
LOG.debug("Read blockID log of size: {} position {} remaining {}",
bytesRead, blockIDBuffer.position(), blockIDBuffer.remaining());
// current position of in the buffer in bytes, divided by number of
// bytes per long (which is calculated by number of bits per long
// divided by number of bits per byte) gives the number of blocks
int blockCount = blockIDBuffer.position()/(Long.SIZE / Byte.SIZE);
if (isDirtyLogFile) {
getTargetMetrics().incNumBytesDirtyLogRead(bytesRead);
} else {
getTargetMetrics().incNumBytesRetryLogRead(bytesRead);
}
if (finishCountMap.containsKey(message.getFileName())) {
// In theory this should never happen. But if it happened,
// we need to know it...
getTargetMetrics().incNumIllegalDirtyLogFiles();
LOG.error("Adding DirtyLog file again {} current count {} new {}",
message.getFileName(),
finishCountMap.get(message.getFileName()).expectedCount,
blockCount);
}
finishCountMap.put(message.getFileName(),
new FinishCounter(blockCount, message.getDbPath(),
message.getFileName(), this));
// should be flip instead of rewind, because we also need to make sure
// the end position is correct.
blockIDBuffer.flip();
LOG.debug("Remaining blocks count {} and {}", blockIDBuffer.remaining(),
blockCount);
while (blockIDBuffer.remaining() >= (Long.SIZE / Byte.SIZE)) {
long blockID = blockIDBuffer.getLong();
int retryCount = 0;
if (isDirtyLogFile) {
getTargetMetrics().incNumDirtyLogBlockRead();
} else {
getTargetMetrics().incNumRetryLogBlockRead();
Preconditions.checkState(fileNameParts.length == 4);
retryCount = Integer.parseInt(fileNameParts[3]);
}
LogicalBlock block = new DiskBlock(blockID, null, false);
BlockWriterTask blockWriterTask = new BlockWriterTask(block, this,
message.getDbPath(), retryCount, message.getFileName(),
maxRetryCount);
threadPoolExecutor.submit(blockWriterTask);
}
blockIDBuffer.clear();
} catch (InterruptedException e) {
LOG.info("ContainerCacheFlusher is interrupted.", e);
} catch (FileNotFoundException e) {
LOG.error("Unable to find the dirty blocks file. This will cause " +
"data errors. Please stop using this volume.", e);
} catch (IOException e) {
LOG.error("Unable to read the dirty blocks file. This will cause " +
"data errors. Please stop using this volume.", e);
} catch (Exception e) {
LOG.error("Generic exception.", e);
}
}
LOG.info("Exiting flusher");
}
/**
* Tries to get the local host IP Address as trace prefix
* for creating trace IDs, otherwise uses a random UUID for it.
*/
private static String getTracePrefix() {
String tmp;
try {
tmp = InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException ex) {
tmp = UUID.randomUUID().toString();
LOG.error("Unable to read the host address. Using a GUID for " +
"hostname:{} ", tmp, ex);
}
return tmp;
}
/**
* We create a trace ID to make it easy to debug issues.
* A trace ID is in IPAddress:UserName:VolumeName:blockID:second format.
*
* This will get written down on the data node if we get any failures, so
* with this trace ID we can correlate cBlock failures across machines.
*
* @param blockID - Block ID
* @return trace ID
*/
public String getTraceID(File dbPath, long blockID) {
String volumeName = dbPath.getName();
String userName = dbPath.getParentFile().getName();
// mapping to seconds to make the string smaller.
return tracePrefix + ":" + userName + ":" + volumeName
+ ":" + blockID + ":" + Time.monotonicNow() / 1000;
}
/**
* Keeps a Reference counted DB that we close only when the total Reference
* has gone to zero.
*/
private static class RefCountedDB {
private LevelDBStore db;
private AtomicInteger refcount;
private String dbPath;
/**
* RefCountedDB DB ctor.
*
* @param dbPath - DB path.
* @param db - LevelDBStore db
*/
RefCountedDB(String dbPath, LevelDBStore db) {
this.db = db;
this.refcount = new AtomicInteger(1);
this.dbPath = dbPath;
}
/**
* close the DB if possible.
*/
public int close() throws IOException {
int count = this.refcount.decrementAndGet();
if (count == 0) {
LOG.info("Closing the LevelDB. {} ", this.dbPath);
db.close();
}
return count;
}
public void open() {
this.refcount.incrementAndGet();
}
}
/**
* The message held in processing queue.
*/
private static class Message {
private String dbPath;
private String fileName;
/**
* A message that holds the info about which path dirty blocks log and
* which path contains db.
*
* @param dbPath
* @param fileName
*/
Message(String dbPath, String fileName) {
this.dbPath = dbPath;
this.fileName = fileName;
}
public String getDbPath() {
return dbPath;
}
public void setDbPath(String dbPath) {
this.dbPath = dbPath;
}
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
}
private static class FinishCounter {
private final long expectedCount;
private final String dbPath;
private final String dirtyLogPath;
private final AtomicLong currentCount;
private AtomicBoolean fileDeleted;
private final ContainerCacheFlusher flusher;
FinishCounter(long expectedCount, String dbPath,
String dirtyLogPath, ContainerCacheFlusher flusher) throws IOException {
this.expectedCount = expectedCount;
this.dbPath = dbPath;
this.dirtyLogPath = dirtyLogPath;
this.currentCount = new AtomicLong(0);
this.fileDeleted = new AtomicBoolean(false);
this.flusher = flusher;
}
public boolean isFileDeleted() {
return fileDeleted.get();
}
public void incCount() {
long count = this.currentCount.incrementAndGet();
if (count >= expectedCount) {
String filePath = String.format("%s/%s", dbPath, dirtyLogPath);
LOG.debug(
"Deleting {} with count {} {}", filePath, count, expectedCount);
try {
Path path = Paths.get(filePath);
Files.delete(path);
// the following part tries to remove the directory if it is empty
// but not sufficient, because the .db directory still exists....
// TODO how to handle the .db directory?
/*Path parent = path.getParent();
if (parent.toFile().listFiles().length == 0) {
Files.delete(parent);
}*/
fileDeleted.set(true);
} catch (Exception e) {
flusher.getTargetMetrics().incNumFailedDirtyLogFileDeletes();
LOG.error("Error deleting dirty log file:" + filePath, e);
}
}
}
}
}

View File

@ -1,132 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ISCSI_ADVERTISED_IP;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ISCSI_ADVERTISED_PORT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT;
import org.apache.hadoop.cblock.CblockUtils;
import org.apache.hadoop.cblock.protocolPB.CBlockClientServerProtocolPB;
import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
import org.apache.hadoop.security.UserGroupInformation;
import org.jscsi.target.Configuration;
import java.net.InetSocketAddress;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_CONTAINER_SIZE_GB_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_PORT_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_PORT_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
/**
* This class runs the target server process.
*/
public final class SCSITargetDaemon {
public static void main(String[] args) throws Exception {
CblockUtils.activateConfigs();
OzoneConfiguration ozoneConf = new OzoneConfiguration();
RPC.setProtocolEngine(ozoneConf, CBlockClientServerProtocolPB.class,
ProtobufRpcEngine.class);
long containerSizeGB = ozoneConf.getInt(DFS_CBLOCK_CONTAINER_SIZE_GB_KEY,
DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT);
ContainerOperationClient.setContainerSizeB(
containerSizeGB * OzoneConsts.GB);
String jscsiServerAddress = ozoneConf.get(
DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY,
DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT);
String cbmIPAddress = ozoneConf.get(
DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_KEY,
DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_DEFAULT
);
int cbmPort = ozoneConf.getInt(
DFS_CBLOCK_JSCSI_PORT_KEY,
DFS_CBLOCK_JSCSI_PORT_DEFAULT
);
String scmAddress = ozoneConf.get(OZONE_SCM_CLIENT_BIND_HOST_KEY,
OZONE_SCM_CLIENT_BIND_HOST_DEFAULT);
int scmClientPort = ozoneConf.getInt(OZONE_SCM_CLIENT_PORT_KEY,
OZONE_SCM_CLIENT_PORT_DEFAULT);
int scmDatanodePort = ozoneConf.getInt(OZONE_SCM_DATANODE_PORT_KEY,
OZONE_SCM_DATANODE_PORT_DEFAULT);
String scmClientAddress = scmAddress + ":" + scmClientPort;
String scmDataodeAddress = scmAddress + ":" + scmDatanodePort;
ozoneConf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmClientAddress);
ozoneConf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmDataodeAddress);
InetSocketAddress cbmAddress = new InetSocketAddress(
cbmIPAddress, cbmPort);
long version = RPC.getProtocolVersion(
CBlockServiceProtocolPB.class);
CBlockClientProtocolClientSideTranslatorPB cbmClient =
new CBlockClientProtocolClientSideTranslatorPB(
RPC.getProxy(CBlockClientServerProtocolPB.class, version,
cbmAddress, UserGroupInformation.getCurrentUser(), ozoneConf,
NetUtils.getDefaultSocketFactory(ozoneConf), 5000)
);
CBlockManagerHandler cbmHandler = new CBlockManagerHandler(cbmClient);
String advertisedAddress = ozoneConf.
getTrimmed(DFS_CBLOCK_ISCSI_ADVERTISED_IP, jscsiServerAddress);
int advertisedPort = ozoneConf.
getInt(DFS_CBLOCK_ISCSI_ADVERTISED_PORT,
DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT);
Configuration jscsiConfig =
new Configuration(jscsiServerAddress,
advertisedAddress,
advertisedPort);
DefaultMetricsSystem.initialize("CBlockMetrics");
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
CBlockTargetServer targetServer = new CBlockTargetServer(
ozoneConf, jscsiConfig, cbmHandler, metrics);
targetServer.call();
}
private SCSITargetDaemon() {
}
}

View File

@ -1,52 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache;
import java.io.IOException;
/**
* Defines the interface for cache implementations. The cache will be called
* by cblock storage module when it performs IO operations.
*/
public interface CacheModule {
/**
* check if the key is cached, if yes, returned the cached object.
* otherwise, load from data source. Then put it into cache.
*
* @param blockID
* @return the target block.
*/
LogicalBlock get(long blockID) throws IOException;
/**
* put the value of the key into cache.
* @param blockID
* @param value
*/
void put(long blockID, byte[] value) throws IOException;
void flush() throws IOException;
void start() throws IOException;
void stop() throws IOException;
void close() throws IOException;
boolean isDirtyCache();
}

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache;
import java.nio.ByteBuffer;
/**
* Logical Block is the data structure that we write to the cache,
* the key and data gets written to remote contianers. Rest is used for
* book keeping for the cache.
*/
public interface LogicalBlock {
/**
* Returns the data stream of this block.
* @return - ByteBuffer
*/
ByteBuffer getData();
/**
* Frees the byte buffer since we don't need it any more.
*/
void clearData();
/**
* Returns the Block ID for this Block.
* @return long - BlockID
*/
long getBlockID();
/**
* Flag that tells us if this block has been persisted to container.
* @return whether this block is now persistent
*/
boolean isPersisted();
}

View File

@ -1,221 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache.impl;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* A Queue that is used to write blocks asynchronously to the container.
*/
public class AsyncBlockWriter {
private static final Logger LOG =
LoggerFactory.getLogger(AsyncBlockWriter.class);
/**
* XceiverClientManager is used to get client connections to a set of
* machines.
*/
private final XceiverClientManager xceiverClientManager;
/**
* This lock is used as a signal to re-queuing thread. The requeue thread
* wakes up as soon as it is signaled some blocks are in the retry queue.
* We try really aggressively since this new block will automatically move
* to the end of the queue.
* <p>
* In the event a container is unavailable for a long time, we can either
* fail all writes or remap and let the writes succeed. The easier
* semantics is to fail the volume until the container is recovered by SCM.
*/
private final Lock lock;
private final Condition notEmpty;
/**
* The cache this writer is operating against.
*/
private final CBlockLocalCache parentCache;
private final BlockBufferManager blockBufferManager;
public final static String DIRTY_LOG_PREFIX = "DirtyLog";
public static final String RETRY_LOG_PREFIX = "RetryLog";
private AtomicLong localIoCount;
/**
* Constructs an Async Block Writer.
*
* @param config - Config
* @param cache - Parent Cache for this writer
*/
public AsyncBlockWriter(Configuration config, CBlockLocalCache cache) {
Preconditions.checkNotNull(cache, "Cache cannot be null.");
Preconditions.checkNotNull(cache.getCacheDB(), "DB cannot be null.");
localIoCount = new AtomicLong();
lock = new ReentrantLock();
notEmpty = lock.newCondition();
parentCache = cache;
xceiverClientManager = cache.getClientManager();
blockBufferManager = new BlockBufferManager(config, parentCache);
}
public void start() throws IOException {
File logDir = new File(parentCache.getDbPath().toString());
if (!logDir.exists() && !logDir.mkdirs()) {
LOG.error("Unable to create the log directory, Critical error cannot " +
"continue. Log Dir : {}", logDir);
throw new IllegalStateException("Cache Directory create failed, Cannot " +
"continue. Log Dir: {}" + logDir);
}
blockBufferManager.start();
}
/**
* Return the log to write to.
*
* @return Logger.
*/
public static Logger getLOG() {
return LOG;
}
/**
* Get the CacheDB.
*
* @return LevelDB Handle
*/
LevelDBStore getCacheDB() {
return parentCache.getCacheDB();
}
/**
* Returns the client manager.
*
* @return XceiverClientManager
*/
XceiverClientManager getXceiverClientManager() {
return xceiverClientManager;
}
/**
* Incs the localIoPacket Count that has gone into this device.
*/
public long incrementLocalIO() {
return localIoCount.incrementAndGet();
}
/**
* Return the local io counts to this device.
* @return the count of io
*/
public long getLocalIOCount() {
return localIoCount.get();
}
/**
* Writes a block to LevelDB store and queues a work item for the system to
* sync the block to containers.
*
* @param block - Logical Block
*/
public void writeBlock(LogicalBlock block) throws IOException {
byte[] keybuf = Longs.toByteArray(block.getBlockID());
String traceID = parentCache.getTraceID(block.getBlockID());
if (parentCache.isShortCircuitIOEnabled()) {
long startTime = Time.monotonicNow();
getCacheDB().put(keybuf, block.getData().array());
incrementLocalIO();
long endTime = Time.monotonicNow();
parentCache.getTargetMetrics().updateDBWriteLatency(
endTime - startTime);
if (parentCache.isTraceEnabled()) {
String datahash = DigestUtils.sha256Hex(block.getData().array());
parentCache.getTracer().info(
"Task=WriterTaskDBPut,BlockID={},Time={},SHA={}",
block.getBlockID(), endTime - startTime, datahash);
}
block.clearData();
blockBufferManager.addToBlockBuffer(block.getBlockID());
} else {
Pipeline pipeline = parentCache.getPipeline(block.getBlockID());
String containerName = pipeline.getContainerName();
XceiverClientSpi client = null;
try {
long startTime = Time.monotonicNow();
client = parentCache.getClientManager()
.acquireClient(parentCache.getPipeline(block.getBlockID()));
ContainerProtocolCalls.writeSmallFile(client, containerName,
Long.toString(block.getBlockID()), block.getData().array(),
traceID);
long endTime = Time.monotonicNow();
if (parentCache.isTraceEnabled()) {
String datahash = DigestUtils.sha256Hex(block.getData().array());
parentCache.getTracer().info(
"Task=DirectWriterPut,BlockID={},Time={},SHA={}",
block.getBlockID(), endTime - startTime, datahash);
}
parentCache.getTargetMetrics().
updateDirectBlockWriteLatency(endTime - startTime);
parentCache.getTargetMetrics().incNumDirectBlockWrites();
} catch (Exception ex) {
parentCache.getTargetMetrics().incNumFailedDirectBlockWrites();
LOG.error("Direct I/O writing of block:{} traceID:{} to "
+ "container {} failed", block.getBlockID(), traceID,
containerName, ex);
throw ex;
} finally {
if (client != null) {
parentCache.getClientManager().releaseClient(client);
}
block.clearData();
}
}
}
/**
* Shutdown by writing any pending I/O to dirtylog buffer.
*/
public void shutdown() {
blockBufferManager.shutdown();
}
/**
* Returns tracer.
*
* @return Tracer
*/
Logger getTracer() {
return parentCache.getTracer();
}
}

View File

@ -1,118 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache.impl;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Paths;
/**
* This task is responsible for flushing the BlockIDBuffer
* to Dirty Log File. This Dirty Log file is used later by
* ContainerCacheFlusher when the data is written to container
*/
public class BlockBufferFlushTask implements Runnable {
private static final Logger LOG =
LoggerFactory.getLogger(BlockBufferFlushTask.class);
private final CBlockLocalCache parentCache;
private final BlockBufferManager bufferManager;
private final ByteBuffer blockIDBuffer;
BlockBufferFlushTask(ByteBuffer blockIDBuffer, CBlockLocalCache parentCache,
BlockBufferManager manager) {
this.parentCache = parentCache;
this.bufferManager = manager;
this.blockIDBuffer = blockIDBuffer;
}
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
try {
writeBlockBufferToFile(blockIDBuffer);
} catch (Exception e) {
parentCache.getTargetMetrics().incNumFailedBlockBufferFlushes();
LOG.error("Unable to sync the Block map to disk with "
+ (blockIDBuffer.position() / Long.SIZE) + "entries "
+ "-- NOTE: This might cause a data loss or corruption", e);
} finally {
bufferManager.releaseBuffer(blockIDBuffer);
}
}
/**
* Write Block Buffer to file.
*
* @param buffer - ByteBuffer
* @throws IOException
*/
private void writeBlockBufferToFile(ByteBuffer buffer)
throws IOException {
long startTime = Time.monotonicNow();
boolean append = false;
// If there is nothing written to blockId buffer,
// then skip flushing of blockId buffer
if (buffer.position() == 0) {
return;
}
buffer.flip();
String fileName =
String.format("%s.%s", AsyncBlockWriter.DIRTY_LOG_PREFIX,
Time.monotonicNow());
String log = Paths.get(parentCache.getDbPath().toString(), fileName)
.toString();
FileChannel channel = new FileOutputStream(log, append).getChannel();
int bytesWritten = channel.write(buffer);
channel.close();
buffer.clear();
parentCache.processDirtyMessage(fileName);
long endTime = Time.monotonicNow();
if (parentCache.isTraceEnabled()) {
parentCache.getTracer().info(
"Task=DirtyBlockLogWrite,Time={} bytesWritten={}",
endTime - startTime, bytesWritten);
}
parentCache.getTargetMetrics().incNumBlockBufferFlushCompleted();
parentCache.getTargetMetrics().incNumBytesDirtyLogWritten(bytesWritten);
parentCache.getTargetMetrics().
updateBlockBufferFlushLatency(endTime - startTime);
LOG.debug("Block buffer writer bytesWritten:{} Time:{}",
bytesWritten, endTime - startTime);
}
}

View File

@ -1,184 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache.impl;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_KEEP_ALIVE;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_THREAD_PRIORITY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT;
/**
* This class manages the block ID buffer.
* Block ID Buffer keeps a list of blocks which are in leveldb cache
* This buffer is used later when the blocks are flushed to container
*
* Two blockIDBuffers are maintained so that write are not blocked when
* DirtyLog is being written. Once a blockIDBuffer is full, it will be
* enqueued for DirtyLog write while the other buffer accepts new write.
* Once the DirtyLog write is done, the buffer is returned back to the pool.
*
* There are three triggers for blockIDBuffer flush
* 1) BlockIDBuffer is full,
* 2) Time period defined for blockIDBuffer flush has elapsed.
* 3) Shutdown
*/
public class BlockBufferManager {
private static final Logger LOG =
LoggerFactory.getLogger(BlockBufferManager.class);
private enum FlushReason {
BUFFER_FULL,
SHUTDOWN,
TIMER
};
private final int blockBufferSize;
private final CBlockLocalCache parentCache;
private final ScheduledThreadPoolExecutor scheduledExecutor;
private final ThreadPoolExecutor threadPoolExecutor;
private final long intervalSeconds;
private final ArrayBlockingQueue<ByteBuffer> acquireQueue;
private final ArrayBlockingQueue<Runnable> workQueue;
private ByteBuffer currentBuffer;
BlockBufferManager(Configuration config, CBlockLocalCache parentCache) {
this.parentCache = parentCache;
this.scheduledExecutor = new ScheduledThreadPoolExecutor(1);
this.intervalSeconds =
config.getTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT,
TimeUnit.SECONDS);
long keepAlive = config.getTimeDuration(DFS_CBLOCK_CACHE_KEEP_ALIVE,
DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT,
TimeUnit.SECONDS);
this.workQueue = new ArrayBlockingQueue<>(2, true);
int threadPri = config.getInt(DFS_CBLOCK_CACHE_THREAD_PRIORITY,
DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT);
ThreadFactory workerThreadFactory = new ThreadFactoryBuilder()
.setNameFormat("Cache Block Buffer Manager Thread #%d")
.setDaemon(true)
.setPriority(threadPri)
.build();
/*
* starting a thread pool with core pool size of 1 and maximum of 2 threads
* as there are maximum of 2 buffers which can be flushed at the same time.
*/
this.threadPoolExecutor = new ThreadPoolExecutor(1, 2,
keepAlive, TimeUnit.SECONDS, workQueue, workerThreadFactory,
new ThreadPoolExecutor.AbortPolicy());
this.blockBufferSize = config.getInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE,
DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT) * (Long.SIZE / Byte.SIZE);
this.acquireQueue = new ArrayBlockingQueue<>(2, true);
for (int i = 0; i < 2; i++) {
acquireQueue.add(ByteBuffer.allocate(blockBufferSize));
}
// get the first buffer to be used
this.currentBuffer = acquireQueue.remove();
LOG.info("BufferManager: Buffer Size:{} FlushIntervalSeconds:{}",
blockBufferSize, intervalSeconds);
}
// triggerBlockBufferFlush enqueues current ByteBuffer for flush and returns.
// This enqueue is asynchronous and hence triggerBlockBufferFlush will
// only block when there are no available buffers in acquireQueue
// Once the DirtyLog write is done, buffer is returned back to
// BlockBufferManager using releaseBuffer
private synchronized void triggerBlockBufferFlush(FlushReason reason) {
LOG.debug("Flush triggered because: " + reason.toString() +
" Num entries in buffer: " +
currentBuffer.position() / (Long.SIZE / Byte.SIZE) +
" Acquire Queue Size: " + acquireQueue.size());
parentCache.getTargetMetrics().incNumBlockBufferFlushTriggered();
BlockBufferFlushTask flushTask =
new BlockBufferFlushTask(currentBuffer, parentCache, this);
threadPoolExecutor.submit(flushTask);
try {
currentBuffer = acquireQueue.take();
} catch (InterruptedException ex) {
currentBuffer = null;
parentCache.getTargetMetrics().incNumInterruptedBufferWaits();
LOG.error("wait on take operation on acquire queue interrupted", ex);
Thread.currentThread().interrupt();
}
}
public synchronized void addToBlockBuffer(long blockId) {
parentCache.getTargetMetrics().incNumBlockBufferUpdates();
currentBuffer.putLong(blockId);
// if no space left, flush this buffer
if (currentBuffer.remaining() == 0) {
triggerBlockBufferFlush(FlushReason.BUFFER_FULL);
}
}
public void releaseBuffer(ByteBuffer buffer) {
if (buffer.position() != 0) {
LOG.error("requeuing a non empty buffer with:{}",
"elements enqueued in the acquire queue",
buffer.position() / (Long.SIZE / Byte.SIZE));
buffer.reset();
}
// There should always be space in the queue to add an element
acquireQueue.add(buffer);
}
// Start a scheduled task to flush blockIDBuffer
public void start() {
Runnable scheduledTask = () -> triggerBlockBufferFlush(FlushReason.TIMER);
scheduledExecutor.scheduleWithFixedDelay(scheduledTask, intervalSeconds,
intervalSeconds, TimeUnit.SECONDS);
threadPoolExecutor.prestartAllCoreThreads();
}
public void shutdown() {
triggerBlockBufferFlush(FlushReason.SHUTDOWN);
scheduledExecutor.shutdown();
threadPoolExecutor.shutdown();
}
}

View File

@ -1,577 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache.impl;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.CacheModule;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.utils.LevelDBStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.FileStore;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_DISK_CACHE_PATH_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_DISK_CACHE_PATH_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_TRACE_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_TRACE_IO_DEFAULT;
/**
* A local cache used by the CBlock ISCSI server. This class is enabled or
* disabled via config settings.
*/
public class CBlockLocalCache implements CacheModule {
private static final Logger LOG =
LoggerFactory.getLogger(CBlockLocalCache.class);
private static final Logger TRACER =
LoggerFactory.getLogger("TraceIO");
private final Configuration conf;
/**
* LevelDB cache file.
*/
private final LevelDBStore cacheDB;
/**
* AsyncBlock writer updates the cacheDB and writes the blocks async to
* remote containers.
*/
private final AsyncBlockWriter blockWriter;
/**
* Sync block reader tries to read from the cache and if we get a cache
* miss we will fetch the block from remote location. It will asynchronously
* update the cacheDB.
*/
private final SyncBlockReader blockReader;
private final String userName;
private final String volumeName;
/**
* From a block ID we are able to get the pipeline by indexing this array.
*/
private final Pipeline[] containerList;
private final int blockSize;
private XceiverClientManager clientManager;
/**
* If this flag is enabled then cache traces all I/O, all reads and writes
* are visible in the log with sha of the block written. Makes the system
* slower use it only for debugging or creating trace simulations.
*/
private final boolean traceEnabled;
private final boolean enableShortCircuitIO;
private final long volumeSize;
private long currentCacheSize;
private File dbPath;
private final ContainerCacheFlusher flusher;
private CBlockTargetMetrics cblockTargetMetrics;
/**
* Get Db Path.
* @return the file instance of the db.
*/
public File getDbPath() {
return dbPath;
}
/**
* Constructor for CBlockLocalCache invoked via the builder.
*
* @param conf - Configuration
* @param volumeName - volume Name
* @param userName - user name
* @param containerPipelines - Pipelines that make up this contianer
* @param blockSize - blockSize
* @param flusher - flusher to flush data to container
* @throws IOException
*/
CBlockLocalCache(
Configuration conf, String volumeName,
String userName, List<Pipeline> containerPipelines, int blockSize,
long volumeSize, ContainerCacheFlusher flusher) throws IOException {
this.conf = conf;
this.userName = userName;
this.volumeName = volumeName;
this.blockSize = blockSize;
this.flusher = flusher;
this.traceEnabled = conf.getBoolean(DFS_CBLOCK_TRACE_IO,
DFS_CBLOCK_TRACE_IO_DEFAULT);
this.enableShortCircuitIO = conf.getBoolean(
DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO,
DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO_DEFAULT);
dbPath = Paths.get(conf.get(DFS_CBLOCK_DISK_CACHE_PATH_KEY,
DFS_CBLOCK_DISK_CACHE_PATH_DEFAULT), userName, volumeName).toFile();
if (!dbPath.exists() && !dbPath.mkdirs()) {
LOG.error("Unable to create the cache paths. Path: {}", dbPath);
throw new IllegalArgumentException("Unable to create paths. Path: " +
dbPath);
}
cacheDB = flusher.getCacheDB(dbPath.toString());
this.containerList = containerPipelines.toArray(new
Pipeline[containerPipelines.size()]);
this.volumeSize = volumeSize;
blockWriter = new AsyncBlockWriter(conf, this);
blockReader = new SyncBlockReader(conf, this);
if (this.traceEnabled) {
getTracer().info("Task=StartingCache");
}
}
private void setClientManager(XceiverClientManager manager) {
this.clientManager = manager;
}
private void setCblockTargetMetrics(CBlockTargetMetrics targetMetrics) {
this.cblockTargetMetrics = targetMetrics;
}
/**
* Returns new builder class that builds a CBlockLocalCache.
*
* @return Builder
*/
public static Builder newBuilder() {
return new Builder();
}
public void processDirtyMessage(String fileName) {
flusher.processDirtyBlocks(dbPath.toString(), fileName);
}
/**
* Get usable disk space.
*
* @param dbPathString - Path to db
* @return long bytes remaining.
*/
private static long getRemainingDiskSpace(String dbPathString) {
try {
URI fileUri = new URI("file:///");
Path dbPath = Paths.get(fileUri).resolve(dbPathString);
FileStore disk = Files.getFileStore(dbPath);
return disk.getUsableSpace();
} catch (URISyntaxException | IOException ex) {
LOG.error("Unable to get free space on for path :" + dbPathString);
}
return 0L;
}
/**
* Returns the Max current CacheSize.
*
* @return - Cache Size
*/
public long getCurrentCacheSize() {
return currentCacheSize;
}
/**
* Sets the Maximum Cache Size.
*
* @param currentCacheSize - Max current Cache Size.
*/
public void setCurrentCacheSize(long currentCacheSize) {
this.currentCacheSize = currentCacheSize;
}
/**
* True if block tracing is enabled.
*
* @return - bool
*/
public boolean isTraceEnabled() {
return traceEnabled;
}
/**
* Checks if Short Circuit I/O is enabled.
*
* @return - true if it is enabled.
*/
public boolean isShortCircuitIOEnabled() {
return enableShortCircuitIO;
}
/**
* Returns the default block size of this device.
*
* @return - int
*/
public int getBlockSize() {
return blockSize;
}
/**
* Gets the client manager.
*
* @return XceiverClientManager
*/
public XceiverClientManager getClientManager() {
return clientManager;
}
/**
* check if the key is cached, if yes, returned the cached object.
* otherwise, load from data source. Then put it into cache.
*
* @param blockID
* @return the block associated to the blockID
*/
@Override
public LogicalBlock get(long blockID) throws IOException {
cblockTargetMetrics.incNumReadOps();
return blockReader.readBlock(blockID);
}
/**
* put the value of the key into cache and remote container.
*
* @param blockID - BlockID
* @param data - byte[]
*/
@Override
public void put(long blockID, byte[] data) throws IOException {
cblockTargetMetrics.incNumWriteOps();
LogicalBlock block = new DiskBlock(blockID, data, false);
blockWriter.writeBlock(block);
}
@Override
public void flush() throws IOException {
}
@Override
public void start() throws IOException {
flusher.register(getDbPath().getPath(), containerList);
blockWriter.start();
}
@Override
public void stop() throws IOException {
}
@Override
public void close() throws IOException {
blockReader.shutdown();
blockWriter.shutdown();
this.flusher.releaseCacheDB(dbPath.toString());
if (this.traceEnabled) {
getTracer().info("Task=ShutdownCache");
}
}
/**
* Returns true if cache still has blocks pending to write.
*
* @return false if we have no pending blocks to write.
*/
@Override
public boolean isDirtyCache() {
return false;
}
/**
* Returns the local cache DB.
*
* @return - DB
*/
LevelDBStore getCacheDB() {
return this.cacheDB;
}
/**
* Returns the current userName.
*
* @return - UserName
*/
String getUserName() {
return this.userName;
}
/**
* Returns the volume name.
*
* @return VolumeName.
*/
String getVolumeName() {
return this.volumeName;
}
/**
* Returns the target metrics.
*
* @return CBlock Target Metrics.
*/
CBlockTargetMetrics getTargetMetrics() {
return this.cblockTargetMetrics;
}
/**
* Returns the pipeline to use given a container.
*
* @param blockId - blockID
* @return - pipeline.
*/
Pipeline getPipeline(long blockId) {
int containerIdx = (int) blockId % containerList.length;
long cBlockIndex =
Longs.fromByteArray(containerList[containerIdx].getData());
if (cBlockIndex > 0) {
// This catches the case when we get a wrong container in the ordering
// of the containers.
Preconditions.checkState(containerIdx % cBlockIndex == 0,
"The container ID computed should match with the container index " +
"returned from cBlock Server.");
}
return containerList[containerIdx];
}
String getTraceID(long blockID) {
return flusher.getTraceID(dbPath, blockID);
}
/**
* Returns tracer.
*
* @return - Logger
*/
Logger getTracer() {
return TRACER;
}
/**
* Builder class for CBlocklocalCache.
*/
public static class Builder {
private Configuration configuration;
private String userName;
private String volumeName;
private List<Pipeline> pipelines;
private XceiverClientManager clientManager;
private int blockSize;
private long volumeSize;
private ContainerCacheFlusher flusher;
private CBlockTargetMetrics metrics;
/**
* Ctor.
*/
Builder() {
}
/**
* Computes a cache size based on the configuration and available disk
* space.
*
* @param configuration - Config
* @param volumeSize - Size of Volume
* @param blockSize - Size of the block
* @return - cache size in bytes.
*/
private static long computeCacheSize(Configuration configuration,
long volumeSize, int blockSize) {
long cacheSize = 0;
String dbPath = configuration.get(DFS_CBLOCK_DISK_CACHE_PATH_KEY,
DFS_CBLOCK_DISK_CACHE_PATH_DEFAULT);
if (StringUtils.isBlank(dbPath)) {
return cacheSize;
}
long spaceRemaining = getRemainingDiskSpace(dbPath);
double cacheRatio = 1.0;
if (spaceRemaining < volumeSize) {
cacheRatio = (double)spaceRemaining / volumeSize;
}
// if cache is going to be at least 10% of the volume size it is worth
// doing, otherwise skip creating the cache.
if (cacheRatio >= 0.10) {
cacheSize = Double.doubleToLongBits(volumeSize * cacheRatio);
}
return cacheSize;
}
/**
* Sets the Config to be used by this cache.
*
* @param conf - Config
* @return Builder
*/
public Builder setConfiguration(Configuration conf) {
this.configuration = conf;
return this;
}
/**
* Sets the user name who is the owner of this volume.
*
* @param user - name of the owner, please note this is not the current
* user name.
* @return - Builder
*/
public Builder setUserName(String user) {
this.userName = user;
return this;
}
/**
* Sets the VolumeName.
*
* @param volume - Name of the volume
* @return Builder
*/
public Builder setVolumeName(String volume) {
this.volumeName = volume;
return this;
}
/**
* Sets the Pipelines that form this volume.
*
* @param pipelineList - list of pipelines
* @return Builder
*/
public Builder setPipelines(List<Pipeline> pipelineList) {
this.pipelines = pipelineList;
return this;
}
/**
* Sets the Client Manager that manages the communication with containers.
*
* @param xceiverClientManager - clientManager.
* @return - Builder
*/
public Builder setClientManager(XceiverClientManager xceiverClientManager) {
this.clientManager = xceiverClientManager;
return this;
}
/**
* Sets the block size -- Typical sizes are 4KB, 8KB etc.
*
* @param size - BlockSize.
* @return - Builder
*/
public Builder setBlockSize(int size) {
this.blockSize = size;
return this;
}
/**
* Sets the volumeSize.
*
* @param size - VolumeSize
* @return - Builder
*/
public Builder setVolumeSize(long size) {
this.volumeSize = size;
return this;
}
/**
* Set flusher.
* @param containerCacheFlusher - cache Flusher
* @return Builder.
*/
public Builder setFlusher(ContainerCacheFlusher containerCacheFlusher) {
this.flusher = containerCacheFlusher;
return this;
}
/**
* Sets the cblock Metrics.
*
* @param targetMetrics - CBlock Target Metrics
* @return - Builder
*/
public Builder setCBlockTargetMetrics(CBlockTargetMetrics targetMetrics) {
this.metrics = targetMetrics;
return this;
}
/**
* Constructs a CBlockLocalCache.
*
* @return the CBlockLocalCache with the preset properties.
* @throws IOException
*/
public CBlockLocalCache build() throws IOException {
Preconditions.checkNotNull(this.configuration, "A valid configuration " +
"is needed");
Preconditions.checkState(StringUtils.isNotBlank(userName), "A valid " +
"username is needed");
Preconditions.checkState(StringUtils.isNotBlank(volumeName), " A valid" +
" volume name is needed");
Preconditions.checkNotNull(this.pipelines, "Pipelines cannot be null");
Preconditions.checkState(this.pipelines.size() > 0, "At least one " +
"pipeline location is needed for a volume");
for (int x = 0; x < pipelines.size(); x++) {
Preconditions.checkNotNull(pipelines.get(x).getData(), "cBlock " +
"relies on private data on the pipeline, null data found.");
}
Preconditions.checkNotNull(clientManager, "Client Manager cannot be " +
"null");
Preconditions.checkState(blockSize > 0, " Block size has to be a " +
"number greater than 0");
Preconditions.checkState(volumeSize > 0, "Volume Size cannot be less " +
"than 1");
Preconditions.checkNotNull(this.flusher, "Flusher cannot be null.");
CBlockLocalCache cache = new CBlockLocalCache(this.configuration,
this.volumeName, this.userName, this.pipelines, blockSize,
volumeSize, flusher);
cache.setCblockTargetMetrics(this.metrics);
cache.setClientManager(this.clientManager);
// TODO : Support user configurable maximum size.
long cacheSize = computeCacheSize(this.configuration, this.volumeSize,
this.blockSize);
cache.setCurrentCacheSize(cacheSize);
return cache;
}
}
}

View File

@ -1,77 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache.impl;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import java.nio.ByteBuffer;
/**
* Impl class for LogicalBlock.
*/
public class DiskBlock implements LogicalBlock {
private ByteBuffer data;
private long blockID;
private boolean persisted;
/**
* Constructs a DiskBlock Class from the following params.
* @param blockID - 64-bit block ID
* @param data - Byte Array
* @param persisted - Flag which tells us if this is persisted to remote
*/
public DiskBlock(long blockID, byte[] data, boolean persisted) {
if (data !=null) {
this.data = ByteBuffer.wrap(data);
}
this.blockID = blockID;
this.persisted = persisted;
}
@Override
public ByteBuffer getData() {
return data;
}
/**
* Frees the byte buffer since we don't need it any more.
*/
@Override
public void clearData() {
data.clear();
}
@Override
public long getBlockID() {
return blockID;
}
@Override
public boolean isPersisted() {
return persisted;
}
/**
* Sets the value of persisted.
* @param value - True if this has been persisted to container, false
* otherwise.
*/
public void setPersisted(boolean value) {
persisted = value;
}
}

View File

@ -1,263 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache.impl;
import com.google.common.primitives.Longs;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
import org.apache.hadoop.utils.LevelDBStore;
import org.iq80.leveldb.DBException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* Reads blocks from the container via the local cache.
*/
public class SyncBlockReader {
private static final Logger LOG =
LoggerFactory.getLogger(SyncBlockReader.class);
/**
* Update Queue - The reason why we have the queue is that we want to
* return the block as soon as we read it from the containers. This queue
* is work queue which will take the read block and update the cache.
* During testing we found levelDB is slow during writes, hence we wanted
* to return as block as soon as possible and update levelDB asynchronously.
*/
private final static int QUEUE_SIZE = 1024;
/**
* Config.
*/
private final Configuration conf;
/**
* The parent cache this reader is operating against.
*/
private final CBlockLocalCache parentCache;
private final BlockingQueue<Runnable> updateQueue;
/**
* executor is used for running LevelDB updates. In future, we might do
* read-aheads and this pool is useful for that too. The reason why we
* don't share an executor for reads and writes is because the write task
* is couple of magnitude slower than read task. So we don't want the
* update DB to queue up behind the writes.
*/
private final ThreadPoolExecutor executor;
/**
* Number of threads that pool starts with.
*/
private final int corePoolSize = 1;
/**
* Maximum number of threads our pool will ever create.
*/
private final int maxPoolSize = 10;
/**
* The idle time a thread hangs around waiting for work. if we don't find
* new work in 60 seconds the worker thread is killed.
*/
private final long keepAlive = 60L;
/**
* Constructs a SyncBlock reader.
*
* @param conf - Configuration
* @param cache - Cache
*/
public SyncBlockReader(Configuration conf, CBlockLocalCache cache) {
this.conf = conf;
this.parentCache = cache;
updateQueue = new ArrayBlockingQueue<>(QUEUE_SIZE, true);
ThreadFactory workerThreadFactory = new ThreadFactoryBuilder()
.setNameFormat("SyncBlockReader Thread #%d").setDaemon(true).build();
executor = new HadoopThreadPoolExecutor(
corePoolSize, maxPoolSize, keepAlive, TimeUnit.SECONDS,
updateQueue, workerThreadFactory,
new ThreadPoolExecutor.CallerRunsPolicy());
}
/**
* Returns the cache DB.
*
* @return LevelDB
*/
private LevelDBStore getCacheDB() {
return parentCache.getCacheDB();
}
/**
* Returns data from the local cache if found, else reads from the remote
* container.
*
* @param blockID - blockID
* @return LogicalBlock
*/
LogicalBlock readBlock(long blockID) throws IOException {
XceiverClientSpi client = null;
byte[] data = getblockFromDB(blockID);
if (data != null) {
parentCache.getTargetMetrics().incNumReadCacheHits();
return new DiskBlock(blockID, data, false);
}
parentCache.getTargetMetrics().incNumReadCacheMiss();
try {
client = parentCache.getClientManager()
.acquireClient(parentCache.getPipeline(blockID));
LogicalBlock block = getBlockFromContainer(blockID, client);
return block;
} catch (Exception ex) {
parentCache.getTargetMetrics().incNumFailedReadBlocks();
LOG.error("read failed for BlockId: {}", blockID, ex);
throw ex;
} finally {
if (client != null) {
parentCache.getClientManager().releaseClient(client);
}
}
}
/**
* Gets data from the DB if it exists.
*
* @param blockID - block id
* @return data
*/
private byte[] getblockFromDB(long blockID) {
try {
if(parentCache.isShortCircuitIOEnabled()) {
long startTime = Time.monotonicNow();
byte[] data = getCacheDB().get(Longs.toByteArray(blockID));
long endTime = Time.monotonicNow();
if (parentCache.isTraceEnabled()) {
parentCache.getTracer().info(
"Task=ReadTaskDBRead,BlockID={},SHA={},Time={}",
blockID, (data != null && data.length > 0)
? DigestUtils.sha256Hex(data) : null,
endTime - startTime);
}
parentCache.getTargetMetrics().updateDBReadLatency(
endTime - startTime);
return data;
}
} catch (DBException dbe) {
LOG.error("Error while reading from cacheDB.", dbe);
throw dbe;
}
return null;
}
/**
* Returns a block from a Remote Container. if the key is not found on a
* remote container we just return a block initialzied with zeros.
*
* @param blockID - blockID
* @param client - client
* @return LogicalBlock
* @throws IOException
*/
private LogicalBlock getBlockFromContainer(long blockID,
XceiverClientSpi client) throws IOException {
String containerName = parentCache.getPipeline(blockID).getContainerName();
try {
long startTime = Time.monotonicNow();
ContainerProtos.GetSmallFileResponseProto response =
ContainerProtocolCalls.readSmallFile(client, containerName,
Long.toString(blockID), parentCache.getTraceID(blockID));
long endTime = Time.monotonicNow();
if (parentCache.isTraceEnabled()) {
parentCache.getTracer().info(
"Task=ReadTaskContainerRead,BlockID={},SHA={},Time={}",
blockID, response.getData().getData().toByteArray().length > 0 ?
DigestUtils.sha256Hex(response.getData()
.getData().toByteArray()) : null, endTime - startTime);
}
parentCache.getTargetMetrics().updateContainerReadLatency(
endTime - startTime);
DiskBlock block = new DiskBlock(blockID,
response.getData().getData().toByteArray(), false);
if(parentCache.isShortCircuitIOEnabled()) {
queueUpdateTask(block);
}
return block;
} catch (IOException ex) {
if (ex instanceof StorageContainerException) {
parentCache.getTargetMetrics().incNumReadLostBlocks();
StorageContainerException sce = (StorageContainerException) ex;
if (sce.getResult() == ContainerProtos.Result.NO_SUCH_KEY ||
sce.getResult() == ContainerProtos.Result.IO_EXCEPTION) {
return new DiskBlock(blockID, new byte[parentCache.getBlockSize()],
false);
}
}
throw ex;
}
}
/**
* Updates the cache with the block that we just read.
*
* @param block
*/
private void queueUpdateTask(final DiskBlock block) {
Runnable updateTask = () -> {
if(block.getData().array().length > 0) {
getCacheDB().put(Longs.toByteArray(block.getBlockID()),
block.getData().array());
block.setPersisted(true);
} else {
LOG.error("Refusing to update the a null block in the local cache.");
}
};
if (this.executor.isShutdown() || this.executor.isTerminated()) {
LOG.error("Thread executor is not running.");
} else {
this.executor.submit(updateTask);
}
}
/**
* This is a read operation, we don't care if we updated the cache with the
* last block e read.
*/
void shutdown() {
this.executor.shutdownNow();
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache.impl;

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper.cache;

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.jscsiHelper;

View File

@ -1,331 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.kubernetes;
import com.google.gson.reflect.TypeToken;
import com.squareup.okhttp.RequestBody;
import io.kubernetes.client.ApiClient;
import io.kubernetes.client.ApiException;
import io.kubernetes.client.Configuration;
import io.kubernetes.client.apis.CoreV1Api;
import io.kubernetes.client.models.V1ISCSIVolumeSource;
import io.kubernetes.client.models.V1ObjectMeta;
import io.kubernetes.client.models.V1ObjectReference;
import io.kubernetes.client.models.V1PersistentVolume;
import io.kubernetes.client.models.V1PersistentVolumeClaim;
import io.kubernetes.client.models.V1PersistentVolumeSpec;
import io.kubernetes.client.util.Config;
import io.kubernetes.client.util.Watch;
import okio.Buffer;
import org.apache.hadoop.cblock.CblockUtils;
import org.apache.hadoop.cblock.exception.CBlockException;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.storage.StorageManager;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ISCSI_ADVERTISED_IP;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ISCSI_ADVERTISED_PORT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_KUBERNETES_CBLOCK_USER;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_KUBERNETES_CBLOCK_USER_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_KUBERNETES_CONFIG_FILE_KEY;
/**
* Kubernetes Dynamic Persistent Volume provisioner.
*
* Listens on the kubernetes feed and creates the appropriate cblock AND
* kubernetes PersistentVolume according to the created PersistentVolumeClaims.
*/
public class DynamicProvisioner implements Runnable{
protected static final Logger LOGGER =
LoggerFactory.getLogger(DynamicProvisioner.class);
private static final String STORAGE_CLASS = "cblock";
private static final String PROVISIONER_ID = "hadoop.apache.org/cblock";
private static final String KUBERNETES_PROVISIONER_KEY =
"volume.beta.kubernetes.io/storage-provisioner";
private static final String KUBERNETES_BIND_COMPLETED_KEY =
"pv.kubernetes.io/bind-completed";
private boolean running = true;
private final StorageManager storageManager;
private String kubernetesConfigFile;
private String externalIp;
private int externalPort;
private String cblockUser;
private CoreV1Api api;
private ApiClient client;
private Thread watcherThread;
public DynamicProvisioner(OzoneConfiguration ozoneConf,
StorageManager storageManager) throws IOException {
this.storageManager = storageManager;
kubernetesConfigFile = ozoneConf
.getTrimmed(DFS_CBLOCK_KUBERNETES_CONFIG_FILE_KEY);
String jscsiServerAddress = ozoneConf
.get(DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY,
DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT);
externalIp = ozoneConf.
getTrimmed(DFS_CBLOCK_ISCSI_ADVERTISED_IP, jscsiServerAddress);
externalPort = ozoneConf.
getInt(DFS_CBLOCK_ISCSI_ADVERTISED_PORT,
DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT);
cblockUser = ozoneConf.getTrimmed(DFS_CBLOCK_KUBERNETES_CBLOCK_USER,
DFS_CBLOCK_KUBERNETES_CBLOCK_USER_DEFAULT);
}
public void init() throws IOException {
if (kubernetesConfigFile != null) {
client = Config.fromConfig(kubernetesConfigFile);
} else {
client = Config.fromCluster();
}
client.getHttpClient().setReadTimeout(60, TimeUnit.SECONDS);
Configuration.setDefaultApiClient(client);
api = new CoreV1Api();
watcherThread = new Thread(this);
watcherThread.setName("DynamicProvisioner");
watcherThread.setDaemon(true);
}
@Override
public void run() {
LOGGER.info("Starting kubernetes dynamic provisioner.");
while (running) {
String resourceVersion = null;
try {
Watch<V1PersistentVolumeClaim> watch = Watch.createWatch(client,
api.listPersistentVolumeClaimForAllNamespacesCall(null,
null,
false,
null,
null,
null,
resourceVersion,
null,
true,
null,
null),
new TypeToken<Watch.Response<V1PersistentVolumeClaim>>() {
}.getType());
//check the new pvc resources, and create cblock + pv if needed
for (Watch.Response<V1PersistentVolumeClaim> item : watch) {
V1PersistentVolumeClaim claim = item.object;
if (isPvMissingForPvc(claim)) {
LOGGER.info("Provisioning volumes for PVC {}/{}",
claim.getMetadata().getNamespace(),
claim.getMetadata().getName());
if (LOGGER.isDebugEnabled()) {
RequestBody request =
api.getApiClient().serialize(claim, "application/json");
final Buffer buffer = new Buffer();
request.writeTo(buffer);
LOGGER.debug("New PVC is detected: " + buffer.readUtf8());
}
String volumeName = createVolumeName(claim);
long size = CblockUtils.parseSize(
claim.getSpec().getResources().getRequests().get("storage"));
createCBlock(volumeName, size);
createPersistentVolumeFromPVC(item.object, volumeName);
}
}
} catch (Exception ex) {
if (ex.getCause() != null && ex
.getCause() instanceof SocketTimeoutException) {
//This is normal. We are connection to the kubernetes server and the
//connection should be reopened time to time...
LOGGER.debug("Time exception occured", ex);
} else {
LOGGER.error("Error on provisioning persistent volumes.", ex);
try {
//we can try again in the main loop
Thread.sleep(1000);
} catch (InterruptedException e) {
LOGGER.error("Error on sleeping after an error.", e);
}
}
}
}
}
private boolean isPvMissingForPvc(V1PersistentVolumeClaim claim) {
Map<String, String> annotations = claim.getMetadata().getAnnotations();
return claim.getStatus().getPhase().equals("Pending") && STORAGE_CLASS
.equals(claim.getSpec().getStorageClassName()) && PROVISIONER_ID
.equals(annotations.get(KUBERNETES_PROVISIONER_KEY)) && !"yes"
.equals(annotations.get(KUBERNETES_BIND_COMPLETED_KEY));
}
@VisibleForTesting
protected String createVolumeName(V1PersistentVolumeClaim claim) {
return claim.getMetadata().getName() + "-" + claim.getMetadata()
.getUid();
}
public void stop() {
running = false;
try {
watcherThread.join(60000);
} catch (InterruptedException e) {
LOGGER.error("Kubernetes watcher thread can't stopped gracefully.", e);
}
}
private void createCBlock(String volumeName, long size)
throws CBlockException {
MountVolumeResponse mountVolumeResponse =
storageManager.isVolumeValid(cblockUser, volumeName);
if (!mountVolumeResponse.getIsValid()) {
storageManager
.createVolume(cblockUser, volumeName, size, 4 * 1024);
}
}
private void createPersistentVolumeFromPVC(V1PersistentVolumeClaim claim,
String volumeName) throws ApiException, IOException {
V1PersistentVolume v1PersistentVolume =
persitenceVolumeBuilder(claim, volumeName);
if (LOGGER.isDebugEnabled()) {
RequestBody request =
api.getApiClient().serialize(v1PersistentVolume, "application/json");
final Buffer buffer = new Buffer();
request.writeTo(buffer);
LOGGER.debug("Creating new PV: " + buffer.readUtf8());
}
api.createPersistentVolume(v1PersistentVolume, null);
}
protected V1PersistentVolume persitenceVolumeBuilder(
V1PersistentVolumeClaim claim,
String volumeName) {
V1PersistentVolume v1PersistentVolume = new V1PersistentVolume();
v1PersistentVolume.setKind("PersistentVolume");
v1PersistentVolume.setApiVersion("v1");
V1ObjectMeta metadata = new V1ObjectMeta();
metadata.setName(volumeName);
metadata.setNamespace(claim.getMetadata().getNamespace());
metadata.setAnnotations(new HashMap<>());
metadata.getAnnotations()
.put("pv.kubernetes.io/provisioned-by", PROVISIONER_ID);
metadata.getAnnotations()
.put("volume.beta.kubernetes.io/storage-class", STORAGE_CLASS);
v1PersistentVolume.setMetadata(metadata);
V1PersistentVolumeSpec spec = new V1PersistentVolumeSpec();
spec.setCapacity(new HashMap<>());
spec.getCapacity().put("storage",
claim.getSpec().getResources().getRequests().get("storage"));
spec.setAccessModes(new ArrayList<>());
spec.getAccessModes().add("ReadWriteOnce");
V1ObjectReference claimRef = new V1ObjectReference();
claimRef.setName(claim.getMetadata().getName());
claimRef.setNamespace(claim.getMetadata().getNamespace());
claimRef.setKind(claim.getKind());
claimRef.setApiVersion(claim.getApiVersion());
claimRef.setUid(claim.getMetadata().getUid());
spec.setClaimRef(claimRef);
spec.persistentVolumeReclaimPolicy("Delete");
V1ISCSIVolumeSource iscsi = new V1ISCSIVolumeSource();
iscsi.setIqn(cblockUser + ":" + volumeName);
iscsi.setLun(0);
iscsi.setFsType("ext4");
String portal = externalIp + ":" + externalPort;
iscsi.setTargetPortal(portal);
iscsi.setPortals(new ArrayList<>());
iscsi.getPortals().add(portal);
spec.iscsi(iscsi);
v1PersistentVolume.setSpec(spec);
return v1PersistentVolume;
}
@VisibleForTesting
protected CoreV1Api getApi() {
return api;
}
public void start() {
watcherThread.start();
}
}

View File

@ -1,23 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains helper classes to run hadoop cluster in kubernetes
* environment.
*/
package org.apache.hadoop.cblock.kubernetes;

View File

@ -1,107 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.meta;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
/**
*
* The internal representation of a container maintained by CBlock server.
* Include enough information to exactly identify a container for read/write
* operation.
*
* NOTE that this class is work-in-progress. Depends on HDFS-7240 container
* implementation. Currently only to allow testing.
*/
public class ContainerDescriptor {
private final String containerID;
// the index of this container with in a volume
// on creation, there may be no way to know the index of the container
// as it is a volume specific information
private int containerIndex;
private Pipeline pipeline;
public ContainerDescriptor(String containerID) {
this.containerID = containerID;
}
public ContainerDescriptor(String containerID, int containerIndex) {
this.containerID = containerID;
this.containerIndex = containerIndex;
}
public void setContainerIndex(int idx) {
this.containerIndex = idx;
}
public String getContainerID() {
return containerID;
}
public void setPipeline(Pipeline pipeline) {
this.pipeline = pipeline;
}
public Pipeline getPipeline() {
return pipeline;
}
public int getContainerIndex() {
return containerIndex;
}
public long getUtilization() {
return 0;
}
public CBlockClientServerProtocolProtos.ContainerIDProto toProtobuf() {
CBlockClientServerProtocolProtos.ContainerIDProto.Builder builder =
CBlockClientServerProtocolProtos.ContainerIDProto.newBuilder();
builder.setContainerID(containerID);
builder.setIndex(containerIndex);
if (pipeline != null) {
builder.setPipeline(pipeline.getProtobufMessage());
}
return builder.build();
}
public static ContainerDescriptor fromProtobuf(byte[] data)
throws InvalidProtocolBufferException {
CBlockClientServerProtocolProtos.ContainerIDProto id =
CBlockClientServerProtocolProtos.ContainerIDProto.parseFrom(data);
return new ContainerDescriptor(id.getContainerID(),
(int)id.getIndex());
}
@Override
public int hashCode() {
return containerID.hashCode()*37 + containerIndex;
}
@Override
public boolean equals(Object o) {
if (o != null && o instanceof ContainerDescriptor) {
ContainerDescriptor other = (ContainerDescriptor)o;
return containerID.equals(other.containerID) &&
containerIndex == other.containerIndex;
}
return false;
}
}

View File

@ -1,269 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.meta;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* The internal representation maintained by CBlock server as the info for
* a volume. Contains the list of containers belonging to this volume.
*
* Many methods of this class is made such that the volume information (
* including container list) can be easily transformed into a Json string
* that can be stored/parsed from a persistent store for cblock server
* persistence.
*
* This class is still work-in-progress.
*/
public class VolumeDescriptor {
// The main data structure is the container location map
// other thing are mainly just information
// since only one operation at a time is allowed, no
// need to consider concurrency control here
// key is container id
private static final Logger LOG =
LoggerFactory.getLogger(VolumeDescriptor.class);
private ConcurrentHashMap<String, ContainerDescriptor> containerMap;
private String userName;
private int blockSize;
private long volumeSize;
private String volumeName;
// this is essentially the ordered keys of containerMap
// which is kind of redundant information. But since we
// are likely to access it frequently based on ordering.
// keeping this copy to avoid having to sort the key every
// time
private List<String> containerIdOrdered;
/**
* This is not being called explicitly, but this is necessary as
* it will be called by the parse method implicitly when
* reconstructing the object from json string. The get*() methods
* and set*() methods are for the same purpose also.
*/
public VolumeDescriptor() {
this(null, null, 0, 0);
}
public VolumeDescriptor(String userName, String volumeName, long volumeSize,
int blockSize) {
this.containerMap = new ConcurrentHashMap<>();
this.userName = userName;
this.volumeName = volumeName;
this.blockSize = blockSize;
this.volumeSize = volumeSize;
this.containerIdOrdered = new LinkedList<>();
}
public String getUserName() {
return userName;
}
public void setUserName(String userName) {
this.userName = userName;
}
public String getVolumeName() {
return volumeName;
}
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
public long getVolumeSize() {
return volumeSize;
}
public void setVolumeSize(long volumeSize) {
this.volumeSize = volumeSize;
}
public int getBlockSize() {
return blockSize;
}
public void setBlockSize(int blockSize) {
this.blockSize = blockSize;
}
public void setContainerIDs(ArrayList<String> containerIDs) {
containerIdOrdered.addAll(containerIDs);
}
public void addContainer(ContainerDescriptor containerDescriptor) {
containerMap.put(containerDescriptor.getContainerID(),
containerDescriptor);
}
public HashMap<String, Pipeline> getPipelines() {
HashMap<String, Pipeline> pipelines = new HashMap<>();
for (Map.Entry<String, ContainerDescriptor> entry :
containerMap.entrySet()) {
pipelines.put(entry.getKey(), entry.getValue().getPipeline());
}
return pipelines;
}
public boolean isEmpty() {
VolumeInfo info = getInfo();
return info.getUsage() == 0;
}
public VolumeInfo getInfo() {
// TODO : need to actually go through all containers of this volume and
// ask for their utilization.
long utilization = 0;
for (Map.Entry<String, ContainerDescriptor> entry :
containerMap.entrySet()) {
utilization += entry.getValue().getUtilization();
}
return new VolumeInfo(this.userName, this.volumeName,
this.volumeSize, this.blockSize,
utilization * blockSize);
}
public String[] getContainerIDs() {
//ArrayList<Long> ids = new ArrayList(containerMap.keySet());
//return ids.toArray(new Long[ids.size()]);
return containerIdOrdered.toArray(new String[containerIdOrdered.size()]);
}
public List<String> getContainerIDsList() {
return new ArrayList<>(containerIdOrdered);
}
public List<Pipeline> getContainerPipelines() {
Map<String, Pipeline> tmp = getPipelines();
List<Pipeline> pipelineList = new LinkedList<>();
for (String containerIDString : containerIdOrdered) {
pipelineList.add(tmp.get(containerIDString));
}
return pipelineList;
}
@Override
public String toString() {
String string = "";
string += "Username:" + userName + "\n";
string += "VolumeName:" + volumeName + "\n";
string += "VolumeSize:" + volumeSize + "\n";
string += "blockSize:" + blockSize + "\n";
string += "containerIds:" + containerIdOrdered + "\n";
string += "containerIdsWithObject:" + containerMap.keySet();
return string;
}
public CBlockClientServerProtocolProtos.MountVolumeResponseProto
toProtobuf() {
CBlockClientServerProtocolProtos.MountVolumeResponseProto.Builder volume =
CBlockClientServerProtocolProtos.MountVolumeResponseProto.newBuilder();
volume.setIsValid(true);
volume.setVolumeName(volumeName);
volume.setUserName(userName);
volume.setVolumeSize(volumeSize);
volume.setBlockSize(blockSize);
for (String containerIDString : containerIdOrdered) {
ContainerDescriptor containerDescriptor = containerMap.get(
containerIDString);
volume.addAllContainerIDs(containerDescriptor.toProtobuf());
}
return volume.build();
}
public static VolumeDescriptor fromProtobuf(byte[] data)
throws InvalidProtocolBufferException {
CBlockClientServerProtocolProtos.MountVolumeResponseProto volume =
CBlockClientServerProtocolProtos.MountVolumeResponseProto
.parseFrom(data);
String userName = volume.getUserName();
String volumeName = volume.getVolumeName();
long volumeSize = volume.getVolumeSize();
int blockSize = volume.getBlockSize();
VolumeDescriptor volumeDescriptor = new VolumeDescriptor(userName,
volumeName, volumeSize, blockSize);
List<CBlockClientServerProtocolProtos.ContainerIDProto> containers
= volume.getAllContainerIDsList();
String[] containerOrdering = new String[containers.size()];
for (CBlockClientServerProtocolProtos.ContainerIDProto containerProto :
containers) {
ContainerDescriptor containerDescriptor = new ContainerDescriptor(
containerProto.getContainerID(),
(int)containerProto.getIndex());
if(containerProto.hasPipeline()) {
containerDescriptor.setPipeline(
Pipeline.getFromProtoBuf(containerProto.getPipeline()));
}
volumeDescriptor.addContainer(containerDescriptor);
containerOrdering[containerDescriptor.getContainerIndex()] =
containerDescriptor.getContainerID();
}
volumeDescriptor.setContainerIDs(
new ArrayList<>(Arrays.asList(containerOrdering)));
return volumeDescriptor;
}
@Override
public int hashCode() {
return userName.hashCode()*37 + volumeName.hashCode();
}
@Override
public boolean equals(Object o) {
if (o != null && o instanceof VolumeDescriptor) {
VolumeDescriptor other = (VolumeDescriptor)o;
if (!userName.equals(other.getUserName()) ||
!volumeName.equals(other.getVolumeName()) ||
volumeSize != other.getVolumeSize() ||
blockSize != other.getBlockSize()) {
return false;
}
if (containerIdOrdered.size() != other.containerIdOrdered.size() ||
containerMap.size() != other.containerMap.size()) {
return false;
}
for (int i = 0; i<containerIdOrdered.size(); i++) {
if (!containerIdOrdered.get(i).equals(
other.containerIdOrdered.get(i))) {
return false;
}
}
return containerMap.equals(other.containerMap);
}
return false;
}
}

View File

@ -1,79 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.meta;
/**
* A wrapper class that represents the information about a volume. Used in
* communication between CBlock client and CBlock server only.
*/
public class VolumeInfo {
private final String userName;
private final String volumeName;
private final long volumeSize;
private final long blockSize;
private final long usage;
public VolumeInfo(String userName, String volumeName, long volumeSize,
long blockSize, long usage) {
this.userName = userName;
this.volumeName = volumeName;
this.volumeSize = volumeSize;
this.blockSize = blockSize;
this.usage = usage;
}
// When listing volume, the usage will not be set.
public VolumeInfo(String userName, String volumeName, long volumeSize,
long blockSize) {
this.userName = userName;
this.volumeName = volumeName;
this.volumeSize = volumeSize;
this.blockSize = blockSize;
this.usage = -1;
}
public long getVolumeSize() {
return volumeSize;
}
public long getBlockSize() {
return blockSize;
}
public long getUsage() {
return usage;
}
public String getUserName() {
return userName;
}
public String getVolumeName() {
return volumeName;
}
@Override
public String toString() {
return " userName:" + userName +
" volumeName:" + volumeName +
" volumeSize:" + volumeSize +
" blockSize:" + blockSize +
" (sizeInBlocks:" + volumeSize/blockSize + ")" +
" usageInBlocks:" + usage;
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.meta;

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;

View File

@ -1,38 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.proto;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import java.io.IOException;
import java.util.List;
/**
* The protocol that CBlock client side uses to talk to server side. CBlock
* client is the point where a volume is mounted. All the actual volume IO
* operations will go through CBlock client after the volume is mounted.
*
* When users mount a volume on CBlock client, CBlock client side uses this
* protocol to send mount request to CBlock server.
*/
public interface CBlockClientProtocol {
MountVolumeResponse mountVolume(String userName, String volumeName)
throws IOException;
List<VolumeInfo> listVolumes() throws IOException;
}

View File

@ -1,45 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.proto;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.IOException;
import java.util.List;
/**
* CBlock uses a separate command line tool to send volume management
* operations to CBlock server, including create/delete/info/list volumes. This
* is the protocol used by the command line tool to send these requests and get
* responses from CBlock server.
*/
@InterfaceAudience.Private
public interface CBlockServiceProtocol {
void createVolume(String userName, String volumeName,
long volumeSize, int blockSize) throws IOException;
void deleteVolume(String userName, String volumeName,
boolean force) throws IOException;
VolumeInfo infoVolume(String userName,
String volumeName) throws IOException;
List<VolumeInfo> listVolume(String userName) throws IOException;
}

View File

@ -1,79 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.proto;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.util.HashMap;
import java.util.List;
/**
* The response message of mounting a volume. Including enough information
* for the client to communicate (perform IO) with the volume containers
* directly.
*/
public class MountVolumeResponse {
private final boolean isValid;
private final String userName;
private final String volumeName;
private final long volumeSize;
private final int blockSize;
private List<Pipeline> containerList;
private HashMap<String, Pipeline> pipelineMap;
public MountVolumeResponse(boolean isValid, String userName,
String volumeName, long volumeSize, int blockSize,
List<Pipeline> containerList,
HashMap<String, Pipeline> pipelineMap) {
this.isValid = isValid;
this.userName = userName;
this.volumeName = volumeName;
this.volumeSize = volumeSize;
this.blockSize = blockSize;
this.containerList = containerList;
this.pipelineMap = pipelineMap;
}
public boolean getIsValid() {
return isValid;
}
public String getUserName() {
return userName;
}
public String getVolumeName() {
return volumeName;
}
public long getVolumeSize() {
return volumeSize;
}
public int getBlockSize() {
return blockSize;
}
public List<Pipeline> getContainerList() {
return containerList;
}
public HashMap<String, Pipeline> getPipelineMap() {
return pipelineMap;
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.proto;

View File

@ -1,37 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.protocolPB;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.ProtocolInfo;
/**
* This is the protocol CBlock client uses to talk to CBlock server.
* CBlock client is the mounting point of a volume. When a user mounts a
* volume, the cBlock client running on the local node will use this protocol
* to talk to CBlock server to mount the volume.
*/
@ProtocolInfo(protocolName =
"org.apache.hadoop.cblock.protocolPB.CBlockClientServerProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
public interface CBlockClientServerProtocolPB extends
CBlockClientServerProtocolProtos
.CBlockClientServerProtocolService.BlockingInterface {
}

View File

@ -1,116 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.protocolPB;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.CBlockClientProtocol;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.stream.Collectors;
/**
* The server side implementation of cblock client to server protocol.
*/
@InterfaceAudience.Private
public class CBlockClientServerProtocolServerSideTranslatorPB implements
CBlockClientServerProtocolPB {
private final CBlockClientProtocol impl;
public CBlockClientServerProtocolServerSideTranslatorPB(
CBlockClientProtocol impl) {
this.impl = impl;
}
@Override
public CBlockClientServerProtocolProtos.MountVolumeResponseProto mountVolume(
RpcController controller,
CBlockClientServerProtocolProtos.MountVolumeRequestProto request)
throws ServiceException {
String userName = request.getUserName();
String volumeName = request.getVolumeName();
CBlockClientServerProtocolProtos.MountVolumeResponseProto.Builder
resp =
CBlockClientServerProtocolProtos
.MountVolumeResponseProto.newBuilder();
try {
MountVolumeResponse result = impl.mountVolume(userName, volumeName);
boolean isValid = result.getIsValid();
resp.setIsValid(isValid);
if (isValid) {
resp.setUserName(result.getUserName());
resp.setVolumeName(result.getVolumeName());
resp.setVolumeSize(result.getVolumeSize());
resp.setBlockSize(result.getBlockSize());
List<Pipeline> containers = result.getContainerList();
HashMap<String, Pipeline> pipelineMap = result.getPipelineMap();
for (int i=0; i<containers.size(); i++) {
CBlockClientServerProtocolProtos.ContainerIDProto.Builder id =
CBlockClientServerProtocolProtos.ContainerIDProto.newBuilder();
String containerName = containers.get(i).getContainerName();
id.setContainerID(containerName);
id.setIndex(i);
if (pipelineMap.containsKey(containerName)) {
id.setPipeline(pipelineMap.get(containerName).getProtobufMessage());
}
resp.addAllContainerIDs(id.build());
}
}
} catch (IOException e) {
throw new ServiceException(e);
}
return resp.build();
}
@Override
public CBlockClientServerProtocolProtos.ListVolumesResponseProto listVolumes(
RpcController controller,
CBlockClientServerProtocolProtos.ListVolumesRequestProto request)
throws ServiceException {
try {
CBlockClientServerProtocolProtos.ListVolumesResponseProto.Builder resp =
CBlockClientServerProtocolProtos.ListVolumesResponseProto
.newBuilder();
List<VolumeInfo> volumeInfos = impl.listVolumes();
List<CBlockServiceProtocolProtos.VolumeInfoProto> convertedInfos =
volumeInfos.stream().map(
volumeInfo -> CBlockServiceProtocolProtos.VolumeInfoProto
.newBuilder().setUserName(volumeInfo.getUserName())
.setBlockSize(volumeInfo.getBlockSize())
.setVolumeName(volumeInfo.getVolumeName())
.setVolumeSize(volumeInfo.getVolumeSize())
.setUsage(volumeInfo.getUsage()).build())
.collect(Collectors.toList());
resp.addAllVolumeEntry(convertedInfos);
return resp.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
}

View File

@ -1,35 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.protocolPB;
import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.ProtocolInfo;
/**
* Users use a independent command line tool to talk to CBlock server for
* volume operations (create/delete/info/list). This is the protocol used by
* the the command line tool to send these requests to CBlock server.
*/
@ProtocolInfo(protocolName =
"org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
public interface CBlockServiceProtocolPB extends
CBlockServiceProtocolProtos.CBlockServiceProtocolService.BlockingInterface {
}

View File

@ -1,159 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.protocolPB;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.CBlockServiceProtocol;
import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_SERVICE_BLOCK_SIZE_DEFAULT;
/**
* Server side implementation of the protobuf service.
*/
@InterfaceAudience.Private
public class CBlockServiceProtocolServerSideTranslatorPB
implements CBlockServiceProtocolPB {
private final CBlockServiceProtocol impl;
private static final Logger LOG =
LoggerFactory.getLogger(
CBlockServiceProtocolServerSideTranslatorPB.class);
@Override
public CBlockServiceProtocolProtos.CreateVolumeResponseProto createVolume(
RpcController controller,
CBlockServiceProtocolProtos.CreateVolumeRequestProto request)
throws ServiceException {
if (LOG.isDebugEnabled()) {
LOG.debug("createVolume called! volume size: " + request.getVolumeSize()
+ " block size: " + request.getBlockSize());
}
try {
if (request.hasBlockSize()) {
impl.createVolume(request.getUserName(), request.getVolumeName(),
request.getVolumeSize(), request.getBlockSize());
} else{
impl.createVolume(request.getUserName(), request.getVolumeName(),
request.getVolumeSize(), DFS_CBLOCK_SERVICE_BLOCK_SIZE_DEFAULT);
}
} catch (IOException e) {
throw new ServiceException(e);
}
return CBlockServiceProtocolProtos.CreateVolumeResponseProto
.newBuilder().build();
}
@Override
public CBlockServiceProtocolProtos.DeleteVolumeResponseProto deleteVolume(
RpcController controller,
CBlockServiceProtocolProtos.DeleteVolumeRequestProto request)
throws ServiceException {
if (LOG.isDebugEnabled()) {
LOG.debug("deleteVolume called! volume name: " + request.getVolumeName()
+ " force:" + request.getForce());
}
try {
if (request.hasForce()) {
impl.deleteVolume(request.getUserName(), request.getVolumeName(),
request.getForce());
} else {
impl.deleteVolume(request.getUserName(), request.getVolumeName(),
false);
}
} catch (IOException e) {
throw new ServiceException(e);
}
return CBlockServiceProtocolProtos.DeleteVolumeResponseProto
.newBuilder().build();
}
@Override
public CBlockServiceProtocolProtos.InfoVolumeResponseProto infoVolume(
RpcController controller,
CBlockServiceProtocolProtos.InfoVolumeRequestProto request
) throws ServiceException {
if (LOG.isDebugEnabled()) {
LOG.debug("infoVolume called! volume name: " + request.getVolumeName());
}
CBlockServiceProtocolProtos.InfoVolumeResponseProto.Builder resp =
CBlockServiceProtocolProtos.InfoVolumeResponseProto.newBuilder();
CBlockServiceProtocolProtos.VolumeInfoProto.Builder volumeInfoProto =
CBlockServiceProtocolProtos.VolumeInfoProto.newBuilder();
VolumeInfo volumeInfo;
try {
volumeInfo = impl.infoVolume(request.getUserName(),
request.getVolumeName());
} catch (IOException e) {
throw new ServiceException(e);
}
volumeInfoProto.setVolumeSize(volumeInfo.getVolumeSize());
volumeInfoProto.setBlockSize(volumeInfo.getBlockSize());
volumeInfoProto.setUsage(volumeInfo.getUsage());
volumeInfoProto.setUserName(volumeInfo.getUserName());
volumeInfoProto.setVolumeName(volumeInfo.getVolumeName());
resp.setVolumeInfo(volumeInfoProto);
return resp.build();
}
@Override
public CBlockServiceProtocolProtos.ListVolumeResponseProto listVolume(
RpcController controller,
CBlockServiceProtocolProtos.ListVolumeRequestProto request
) throws ServiceException {
CBlockServiceProtocolProtos.ListVolumeResponseProto.Builder resp =
CBlockServiceProtocolProtos.ListVolumeResponseProto.newBuilder();
String userName = null;
if (request.hasUserName()) {
userName = request.getUserName();
}
if (LOG.isDebugEnabled()) {
LOG.debug("list volume received for :" + userName);
}
List<VolumeInfo> volumes;
try {
volumes = impl.listVolume(userName);
} catch (IOException e) {
throw new ServiceException(e);
}
for (VolumeInfo volume : volumes) {
CBlockServiceProtocolProtos.VolumeInfoProto.Builder volumeEntryProto
= CBlockServiceProtocolProtos.VolumeInfoProto.newBuilder();
volumeEntryProto.setUserName(volume.getUserName());
volumeEntryProto.setVolumeName(volume.getVolumeName());
volumeEntryProto.setVolumeSize(volume.getVolumeSize());
volumeEntryProto.setBlockSize(volume.getBlockSize());
resp.addVolumeEntry(volumeEntryProto.build());
}
return resp.build();
}
public CBlockServiceProtocolServerSideTranslatorPB(
CBlockServiceProtocol impl) {
this.impl = impl;
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.protocolPB;

View File

@ -1,427 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.storage;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.cblock.CBlockConfigKeys;
import org.apache.hadoop.cblock.exception.CBlockException;
import org.apache.hadoop.cblock.meta.ContainerDescriptor;
import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
/**
* This class maintains the key space of CBlock, more specifically, the
* volume to container mapping. The core data structure
* is a map from users to their volumes info, where volume info is a handler
* to a volume, containing information for IO on that volume and a storage
* client responsible for talking to the SCM.
*/
public class StorageManager {
private static final Logger LOGGER =
LoggerFactory.getLogger(StorageManager.class);
private final ScmClient storageClient;
private final int numThreads;
private static final int MAX_THREADS =
Runtime.getRuntime().availableProcessors() * 2;
private static final int MAX_QUEUE_CAPACITY = 1024;
private final String cblockId;
/**
* We will NOT have the situation where same kv pair getting
* processed, but it is possible to have multiple kv pair being
* processed at same time.
*
* So using just ConcurrentHashMap should be sufficient
*
* Again since currently same user accessing from multiple places
* is not allowed, no need to consider concurrency of volume map
* within one user
*/
private ConcurrentHashMap<String, HashMap<String, VolumeDescriptor>>
user2VolumeMap;
// size of an underlying container.
// TODO : assuming all containers are of the same size
private long containerSizeB;
public StorageManager(ScmClient storageClient,
OzoneConfiguration ozoneConfig, String cblockId) throws IOException {
this.storageClient = storageClient;
this.user2VolumeMap = new ConcurrentHashMap<>();
this.containerSizeB = storageClient.getContainerSize(null);
this.numThreads =
ozoneConfig.getInt(CBlockConfigKeys.DFS_CBLOCK_MANAGER_POOL_SIZE,
CBlockConfigKeys.DFS_CBLOCK_MANAGER_POOL_SIZE_DEFAULT);
this.cblockId = cblockId;
}
/**
* This call will put the volume into in-memory map.
*
* more specifically, make the volume discoverable on jSCSI server
* and keep it's reference in-memory for look up.
* @param userName the user name of the volume.
* @param volumeName the name of the volume,
* @param volume a {@link VolumeDescriptor} object encapsulating the
* information about the volume.
*/
private void makeVolumeReady(String userName, String volumeName,
VolumeDescriptor volume) {
HashMap<String, VolumeDescriptor> userVolumes;
if (user2VolumeMap.containsKey(userName)) {
userVolumes = user2VolumeMap.get(userName);
} else {
userVolumes = new HashMap<>();
user2VolumeMap.put(userName, userVolumes);
}
userVolumes.put(volumeName, volume);
}
/**
* Called by CBlockManager to add volumes read from persistent store into
* memory, need to contact SCM to setup the reference to the containers given
* their id.
*
* Only for failover process where container meta info is read from
* persistent store, and containers themselves are alive.
*
* TODO : Currently, this method is not being called as failover process
* is not implemented yet.
*
* @param volumeDescriptor a {@link VolumeDescriptor} object encapsulating
* the information about a volume.
* @throws IOException when adding the volume failed. e.g. volume already
* exist, or no more container available.
*/
public synchronized void addVolume(VolumeDescriptor volumeDescriptor)
throws IOException{
String userName = volumeDescriptor.getUserName();
String volumeName = volumeDescriptor.getVolumeName();
LOGGER.info("addVolume:" + userName + ":" + volumeName);
if (user2VolumeMap.containsKey(userName)
&& user2VolumeMap.get(userName).containsKey(volumeName)) {
throw new CBlockException("Volume already exist for "
+ userName + ":" + volumeName);
}
// the container ids are read from levelDB, setting up the
// container handlers here.
String[] containerIds = volumeDescriptor.getContainerIDs();
for (String containerId : containerIds) {
try {
Pipeline pipeline = storageClient.getContainer(containerId);
ContainerDescriptor containerDescriptor =
new ContainerDescriptor(containerId);
containerDescriptor.setPipeline(pipeline);
volumeDescriptor.addContainer(containerDescriptor);
} catch (IOException e) {
LOGGER.error("Getting container failed! Container:{} error:{}",
containerId, e);
throw e;
}
}
// now ready to put into in-memory map.
makeVolumeReady(userName, volumeName, volumeDescriptor);
}
private class CreateContainerTask implements Runnable {
private final VolumeDescriptor volume;
private final int containerIdx;
private final ArrayList<String> containerIds;
private final AtomicInteger numFailed;
CreateContainerTask(VolumeDescriptor volume, int containerIdx,
ArrayList<String> containerIds,
AtomicInteger numFailed) {
this.volume = volume;
this.containerIdx = containerIdx;
this.containerIds = containerIds;
this.numFailed = numFailed;
}
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
public void run() {
ContainerDescriptor container = null;
try {
Pipeline pipeline = storageClient.createContainer(
HddsProtos.ReplicationType.STAND_ALONE,
HddsProtos.ReplicationFactor.ONE,
KeyUtil.getContainerName(volume.getUserName(),
volume.getVolumeName(), containerIdx), cblockId);
container = new ContainerDescriptor(pipeline.getContainerName());
container.setPipeline(pipeline);
container.setContainerIndex(containerIdx);
volume.addContainer(container);
containerIds.set(containerIdx, container.getContainerID());
} catch (Exception e) {
numFailed.incrementAndGet();
if (container != null) {
LOGGER.error("Error creating container Container:{}:" +
" index:{} error:{}", container.getContainerID(),
containerIdx, e);
} else {
LOGGER.error("Error creating container.", e);
}
}
}
}
private boolean createVolumeContainers(VolumeDescriptor volume) {
ArrayList<String> containerIds = new ArrayList<>();
ThreadPoolExecutor executor = new ThreadPoolExecutor(
Math.min(numThreads, MAX_THREADS),
MAX_THREADS, 1, TimeUnit.SECONDS,
new ArrayBlockingQueue<>(MAX_QUEUE_CAPACITY),
new ThreadPoolExecutor.CallerRunsPolicy());
AtomicInteger numFailedCreates = new AtomicInteger(0);
long allocatedSize = 0;
int containerIdx = 0;
while (allocatedSize < volume.getVolumeSize()) {
// adding null to allocate space in ArrayList
containerIds.add(containerIdx, null);
Runnable task = new CreateContainerTask(volume, containerIdx,
containerIds, numFailedCreates);
executor.submit(task);
allocatedSize += containerSizeB;
containerIdx += 1;
}
// issue the command and then wait for it to finish
executor.shutdown();
try {
executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
} catch (InterruptedException e) {
LOGGER.error("Error creating volume:{} error:{}",
volume.getVolumeName(), e);
executor.shutdownNow();
Thread.currentThread().interrupt();
}
volume.setContainerIDs(containerIds);
return numFailedCreates.get() == 0;
}
private void deleteContainer(String containerID, boolean force) {
try {
Pipeline pipeline = storageClient.getContainer(containerID);
storageClient.deleteContainer(pipeline, force);
} catch (Exception e) {
LOGGER.error("Error deleting container Container:{} error:{}",
containerID, e);
}
}
private void deleteVolumeContainers(List<String> containers, boolean force)
throws CBlockException {
ThreadPoolExecutor executor = new ThreadPoolExecutor(
Math.min(numThreads, MAX_THREADS),
MAX_THREADS, 1, TimeUnit.SECONDS,
new ArrayBlockingQueue<>(MAX_QUEUE_CAPACITY),
new ThreadPoolExecutor.CallerRunsPolicy());
for (String deleteContainer : containers) {
if (deleteContainer != null) {
Runnable task = () -> deleteContainer(deleteContainer, force);
executor.submit(task);
}
}
// issue the command and then wait for it to finish
executor.shutdown();
try {
executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
} catch (InterruptedException e) {
LOGGER.error("Error deleting containers error:{}", e);
executor.shutdownNow();
Thread.currentThread().interrupt();
}
}
/**
* Called by CBlock server when creating a fresh volume. The core
* logic is adding needed information into in-memory meta data.
*
* @param userName the user name of the volume.
* @param volumeName the name of the volume.
* @param volumeSize the size of the volume.
* @param blockSize the block size of the volume.
* @throws CBlockException when the volume can not be created.
*/
public synchronized void createVolume(String userName, String volumeName,
long volumeSize, int blockSize) throws CBlockException {
LOGGER.debug("createVolume:" + userName + ":" + volumeName);
if (user2VolumeMap.containsKey(userName)
&& user2VolumeMap.get(userName).containsKey(volumeName)) {
throw new CBlockException("Volume already exist for "
+ userName + ":" + volumeName);
}
if (volumeSize < blockSize) {
throw new CBlockException("Volume size smaller than block size? " +
"volume size:" + volumeSize + " block size:" + blockSize);
}
VolumeDescriptor volume
= new VolumeDescriptor(userName, volumeName, volumeSize, blockSize);
boolean success = createVolumeContainers(volume);
if (!success) {
// cleanup the containers and throw the exception
deleteVolumeContainers(volume.getContainerIDsList(), true);
throw new CBlockException("Error when creating volume:" + volumeName);
}
makeVolumeReady(userName, volumeName, volume);
}
/**
* Called by CBlock server to delete a specific volume. Mainly
* to check whether it can be deleted, and remove it from in-memory meta
* data.
*
* @param userName the user name of the volume.
* @param volumeName the name of the volume.
* @param force if set to false, only delete volume it is empty, otherwise
* throw exception. if set to true, delete regardless.
* @throws CBlockException when the volume can not be deleted.
*/
public synchronized void deleteVolume(String userName, String volumeName,
boolean force) throws CBlockException {
if (!user2VolumeMap.containsKey(userName)
|| !user2VolumeMap.get(userName).containsKey(volumeName)) {
throw new CBlockException("Deleting non-exist volume "
+ userName + ":" + volumeName);
}
if (!force && !user2VolumeMap.get(userName).get(volumeName).isEmpty()) {
throw new CBlockException("Deleting a non-empty volume without force!");
}
VolumeDescriptor volume = user2VolumeMap.get(userName).remove(volumeName);
deleteVolumeContainers(volume.getContainerIDsList(), force);
if (user2VolumeMap.get(userName).size() == 0) {
user2VolumeMap.remove(userName);
}
}
/**
* Called by CBlock server to get information of a specific volume.
*
* @param userName the user name of the volume.
* @param volumeName the name of the volume.
* @return a {@link VolumeInfo} object encapsulating the information of the
* volume.
* @throws CBlockException when the information can not be retrieved.
*/
public synchronized VolumeInfo infoVolume(String userName, String volumeName)
throws CBlockException {
if (!user2VolumeMap.containsKey(userName)
|| !user2VolumeMap.get(userName).containsKey(volumeName)) {
throw new CBlockException("Getting info for non-exist volume "
+ userName + ":" + volumeName);
}
return user2VolumeMap.get(userName).get(volumeName).getInfo();
}
/**
* Called by CBlock server to check whether the given volume can be
* mounted, i.e. whether it can be found in the meta data.
*
* return a {@link MountVolumeResponse} with isValid flag to indicate
* whether the volume can be mounted or not.
*
* @param userName the user name of the volume.
* @param volumeName the name of the volume
* @return a {@link MountVolumeResponse} object encapsulating whether the
* volume is valid, and if yes, the requried information for client to
* read/write the volume.
*/
public synchronized MountVolumeResponse isVolumeValid(
String userName, String volumeName) {
if (!user2VolumeMap.containsKey(userName)
|| !user2VolumeMap.get(userName).containsKey(volumeName)) {
// in the case of invalid volume, no need to set any value other than
// isValid flag.
return new MountVolumeResponse(false, null, null, 0, 0, null, null);
}
VolumeDescriptor volume = user2VolumeMap.get(userName).get(volumeName);
return new MountVolumeResponse(true, userName,
volumeName, volume.getVolumeSize(), volume.getBlockSize(),
volume.getContainerPipelines(), volume.getPipelines());
}
/**
* Called by CBlock manager to list all volumes.
*
* @param userName the userName whose volume to be listed, if set to null,
* all volumes will be listed.
* @return a list of {@link VolumeDescriptor} representing all volumes
* requested.
*/
public synchronized List<VolumeDescriptor> getAllVolume(String userName) {
ArrayList<VolumeDescriptor> allVolumes = new ArrayList<>();
if (userName == null) {
for (Map.Entry<String, HashMap<String, VolumeDescriptor>> entry
: user2VolumeMap.entrySet()) {
allVolumes.addAll(entry.getValue().values());
}
} else {
if (user2VolumeMap.containsKey(userName)) {
allVolumes.addAll(user2VolumeMap.get(userName).values());
}
}
return allVolumes;
}
/**
* Only for testing the behavior of create/delete volumes.
*/
@VisibleForTesting
public VolumeDescriptor getVolume(String userName, String volumeName) {
if (!user2VolumeMap.containsKey(userName)
|| !user2VolumeMap.get(userName).containsKey(volumeName)) {
return null;
}
return user2VolumeMap.get(userName).get(volumeName);
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.storage;

View File

@ -1,49 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.util;
/**
* A simply class that generates key mappings. (e.g. from (userName, volumeName)
* pair to a single string volumeKey.
*/
public final class KeyUtil {
public static String getVolumeKey(String userName, String volumeName) {
return userName + ":" + volumeName;
}
public static String getContainerName(String userName, String volumeName,
int containerID) {
return getVolumeKey(userName, volumeName) + "#" + containerID;
}
public static String getUserNameFromVolumeKey(String key) {
return key.split(":")[0];
}
public static String getVolumeFromVolumeKey(String key) {
return key.split(":")[1];
}
public static boolean isValidVolumeKey(String key) {
return key.contains(":");
}
private KeyUtil() {
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.util;

View File

@ -1,93 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These .proto interfaces are private and unstable.
* Please see http://wiki.apache.org/hadoop/Compatibility
* for what changes are allowed for a *unstable* .proto interface.
*/
option java_package = "org.apache.hadoop.cblock.protocol.proto";
option java_outer_classname = "CBlockClientServerProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
package hadoop.cblock;
import "hdds.proto";
import "CBlockServiceProtocol.proto";
/**
* This message is sent from CBlock client side to CBlock server to
* mount a volume specified by owner name and volume name.
*
* Right now, this is the only communication between client and server.
* After the volume is mounted, CBlock client will talk to containers
* by itself, nothing to do with CBlock server.
*/
message MountVolumeRequestProto {
required string userName = 1;
required string volumeName = 2;
}
/**
* This message is sent from CBlock server to CBlock client as response
* of mount a volume. It checks the whether the volume is valid to access
* at all.(e.g. volume exist)
*
* And include enough information (volume size, block size, list of
* containers for this volume) for client side to perform read/write on
* the volume.
*/
message MountVolumeResponseProto {
required bool isValid = 1;
optional string userName = 2;
optional string volumeName = 3;
optional uint64 volumeSize = 4;
optional uint32 blockSize = 5;
repeated ContainerIDProto allContainerIDs = 6;
}
/**
* This message include ID of container which can be used to locate the
* container. Since the order of containers needs to be maintained, also
* includes a index field to verify the correctness of the order.
*/
message ContainerIDProto {
required string containerID = 1;
required uint64 index = 2;
// making pipeline optional to be compatible with exisiting tests
optional hadoop.hdds.Pipeline pipeline = 3;
}
message ListVolumesRequestProto {
}
message ListVolumesResponseProto {
repeated VolumeInfoProto volumeEntry = 1;
}
service CBlockClientServerProtocolService {
/**
* mount the volume.
*/
rpc mountVolume(MountVolumeRequestProto) returns (MountVolumeResponseProto);
rpc listVolumes(ListVolumesRequestProto) returns(ListVolumesResponseProto);
}

View File

@ -1,133 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These .proto interfaces are private and unstable.
* Please see http://wiki.apache.org/hadoop/Compatibility
* for what changes are allowed for a *unstable* .proto interface.
*/
option java_package = "org.apache.hadoop.cblock.protocol.proto";
option java_outer_classname = "CBlockServiceProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
package hadoop.cblock;
/**
* This message is sent to CBlock server to create a volume. Creating
* volume requries four parameters: owner of the volume, name of the volume
* size of volume and block size of the volume.
*/
message CreateVolumeRequestProto {
required string userName = 1;
required string volumeName = 2;
required uint64 volumeSize = 3;
optional uint32 blockSize = 4 [default = 4096];
}
/**
* Empty response message.
*/
message CreateVolumeResponseProto {
}
/**
* This message is sent to CBlock server to delete a volume. The volume
* is specified by owner name and volume name. If force is set to
* false, volume will be deleted only if it is empty. Otherwise delete it
* regardless.
*/
message DeleteVolumeRequestProto {
required string userName = 1;
required string volumeName = 2;
optional bool force = 3;
}
/**
* Empty response message.
*/
message DeleteVolumeResponseProto {
}
/**
* This message is sent to CBlock server to request info of a volume. The
* volume is specified by owner name and volume name.
*/
message InfoVolumeRequestProto {
required string userName = 1;
required string volumeName = 2;
}
/**
* This message describes the information of a volume.
* Currently, the info includes the volume creation parameters and a number
* as the usage of the volume, in terms of number of bytes.
*/
message VolumeInfoProto {
required string userName = 1;
required string volumeName = 2;
required uint64 volumeSize = 3;
required uint64 blockSize = 4;
optional uint64 usage = 5;
// TODO : potentially volume ACL
}
/**
* This message is sent from CBlock server as response of info volume request.
*/
message InfoVolumeResponseProto {
optional VolumeInfoProto volumeInfo = 1;
}
/**
* This message is sent to CBlock server to list all available volume.
*/
message ListVolumeRequestProto {
optional string userName = 1;
}
/**
* This message is sent from CBlock server as response of volume listing.
*/
message ListVolumeResponseProto {
repeated VolumeInfoProto volumeEntry = 1;
}
service CBlockServiceProtocolService {
/**
* Create a volume.
*/
rpc createVolume(CreateVolumeRequestProto) returns(CreateVolumeResponseProto);
/**
* Delete a volume.
*/
rpc deleteVolume(DeleteVolumeRequestProto) returns(DeleteVolumeResponseProto);
/**
* Get info of a volume.
*/
rpc infoVolume(InfoVolumeRequestProto) returns(InfoVolumeResponseProto);
/**
* List all available volumes.
*/
rpc listVolume(ListVolumeRequestProto) returns(ListVolumeResponseProto);
}

View File

@ -1,347 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Do not modify this file directly. Instead, copy entries that you -->
<!-- wish to modify from this file into ozone-site.xml and change them -->
<!-- there. If ozone-site.xml does not already exist, create it. -->
<!--Tags supported are OZONE, CBLOCK, MANAGEMENT, SECURITY, PERFORMANCE, -->
<!--DEBUG, CLIENT, SERVER, KSM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
<!--REST, STORAGE, PIPELINE, STANDALONE -->
<configuration>
<!--CBlock Settings-->
<property>
<name>dfs.cblock.block.buffer.flush.interval</name>
<value>60s</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
Controls the frequency at this the local cache flushes the
blocks to the remote containers.
</description>
</property>
<property>
<name>dfs.cblock.cache.block.buffer.size</name>
<value>512</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
Size of the local cache for blocks. So cache size will be block
size multiplied by this number.
</description>
</property>
<property>
<name>dfs.cblock.cache.core.min.pool.size</name>
<value>16</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
A minimum number of threads in the pool that cBlock cache will
use for the background I/O to remote containers.
</description>
</property>
<property>
<name>dfs.cblock.cache.max.pool.size</name>
<value>256</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
Maximum number of threads in the pool that cBlock cache will
use for background I/O to remote containers.
</description>
</property>
<property>
<name>dfs.cblock.cache.keep.alive</name>
<value>60s</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
If the cblock cache has no I/O, then the threads in the cache
pool are kept idle for this amount of time before shutting down.
</description>
</property>
<property>
<name>dfs.cblock.cache.leveldb.cache.size.mb</name>
<value>256</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
The amount of physical memory allocated to the local cache. The
SCSI driver will allocate this much RAM cache instances.
</description>
</property>
<property>
<name>dfs.cblock.cache.max.retry</name>
<value>65536</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
If the local cache is enabled then, CBlock writes to the local
cache when I/O happens. Then the background I/O threads write this
block to the remote containers. This value controls how many times the
background thread should attempt to do I/O to the remote containers
before giving up.
</description>
</property>
<property>
<name>dfs.cblock.cache.queue.size.in.kb</name>
<value>256</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
Size of the in memory cache queue, that is flushed to local
disk.
</description>
</property>
<property>
<name>dfs.cblock.cache.thread.priority</name>
<value>5</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
Priority of cache flusher thread, affecting the relative performance of
write and read. Supported values are 1, 5, 10.
Use 10 for high priority and 1 for low priority.
</description>
</property>
<property>
<name>dfs.cblock.container.size.gb</name>
<value>5</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
The size of ozone container in the number of GBs. Note that
this is not setting container size for ozone. This setting is
instructing CBlock to manage containers at a standard size.
</description>
</property>
<property>
<name>dfs.cblock.disk.cache.path</name>
<value>${hadoop.tmp.dir}/cblockCacheDB</value>
<tag>CBLOCK, REQUIRED</tag>
<description>
The default path for the cblock local cache. If the cblock
local cache is enabled, then it must be set to a valid path. This cache
*should* be mapped to the fastest disk on a given machine, For example,
an SSD drive would be a good idea. Currently, all mounted disk on a
data node is mapped to a single path, so having a large number of IOPS
is essential.
</description>
</property>
<property>
<name>dfs.cblock.jscsi-address</name>
<value/>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
The address that cblock will be bind to, should be a host:port
format, This setting is required for cblock server to start.
This address to be used by jscsi to mount volume.
</description>
</property>
<property>
<name>dfs.cblock.jscsi.cblock.server.address</name>
<value>127.0.0.1</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
The address local jscsi server will use to talk to cblock manager.
</description>
</property>
<property>
<name>dfs.cblock.jscsi.port</name>
<value>9811</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
The port on CBlockManager node for jSCSI to talk to.
</description>
</property>
<property>
<name>dfs.cblock.jscsi.rpc-bind-host</name>
<value>0.0.0.0</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
The actual address the cblock jscsi rpc server will bind to. If
this optional address is set, it overrides only the hostname portion of
dfs.cblock.jscsi-address.
</description>
</property>
<property>
<name>dfs.cblock.jscsi.server.address</name>
<value>0.0.0.0</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
The address that jscsi server will be running, it is nice have one
local jscsi server for each client(Linux JSCSI client) that tries to
mount cblock.
</description>
</property>
<property>
<name>dfs.cblock.manager.pool.size</name>
<value>16</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
Number of active threads that cblock manager will use for container
operations. The maximum number of the threads are limited to the
processor count * 2.
</description>
</property>
<property>
<name>dfs.cblock.rpc.timeout</name>
<value>300s</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
RPC timeout used for cblock CLI operations. When you
create very large disks, like 5TB, etc. The number of containers
allocated in the system is huge. It is will 5TB/5GB, which is 1000
containers. The client CLI might timeout even though the cblock manager
creates the specified disk. This value allows the user to wait for a
longer period.
</description>
</property>
<property>
<name>dfs.cblock.scm.ipaddress</name>
<value>127.0.0.1</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
IP address used by cblock to connect to SCM.
</description>
</property>
<property>
<name>dfs.cblock.scm.port</name>
<value>9860</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
Port used by cblock to connect to SCM.
</description>
</property>
<property>
<name>dfs.cblock.service.handler.count</name>
<value>10</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
Default number of handlers for CBlock service rpc.
</description>
</property>
<property>
<name>dfs.cblock.service.leveldb.path</name>
<value>${hadoop.tmp.dir}/cblock_server.dat</value>
<tag>CBLOCK, REQUIRED</tag>
<description>
Default path for the cblock meta data store.
</description>
</property>
<property>
<name>dfs.cblock.service.rpc-bind-host</name>
<value>0.0.0.0</value>
<tag>CBLOCK, MANAGEMENT</tag>
<description>
The actual address the cblock service RPC server will bind to.
If the optional address is set, it overrides only the hostname portion of
dfs.cblock.servicerpc-address.
</description>
</property>
<property>
<name>dfs.cblock.servicerpc-address</name>
<value/>
<tag>CBLOCK, MANAGEMENT, REQUIRED</tag>
<description>
The address that cblock will be bind to, should be a host:port
format, this setting is required for cblock server to start.
This address is used for cblock management operations like create, delete,
info and list volumes
</description>
</property>
<property>
<name>dfs.cblock.short.circuit.io</name>
<value>false</value>
<tag>CBLOCK, PERFORMANCE</tag>
<description>
Enables use of the local cache in cblock. Enabling this allows
I/O against the local cache and background threads do actual I/O against
the
containers.
</description>
</property>
<property>
<name>dfs.cblock.trace.io</name>
<value>false</value>
<tag>CBLOCK, DEBUG</tag>
<description>Default flag for enabling trace io, Trace I/O logs all I/O with
hashes of
data. This is useful for detecting things like data corruption.
</description>
</property>
<property>
<name>dfs.cblock.iscsi.advertised.ip</name>
<value>0.0.0.0</value>
<tag>CBLOCK</tag>
<description>
IP address returned during the iscsi discovery.
</description>
</property>
<property>
<name>dfs.cblock.iscsi.advertised.port</name>
<value>3260</value>
<tag>CBLOCK</tag>
<description>
TCP port returned during the iscsi discovery.
</description>
</property>
<property>
<name>dfs.cblock.kubernetes.dynamic-provisioner.enabled</name>
<value>false</value>
<tag>CBLOCK, KUBERNETES</tag>
<description>Flag to enable automatic creation of cblocks and
kubernetes PersitentVolumes in kubernetes environment.
</description>
</property>
<property>
<name>dfs.cblock.kubernetes.cblock-user</name>
<value>iqn.2001-04.org.apache.hadoop</value>
<tag>CBLOCK, KUBERNETES</tag>
<description>CBlock user to use for the dynamic provisioner.
This user will own all of the auto-created cblocks.
</description>
</property>
<property>
<name>dfs.cblock.kubernetes.configfile</name>
<value></value>
<tag>CBLOCK, KUBERNETES</tag>
<description>Location of the kubernetes configuration file
to access the kubernetes cluster. Not required inside a pod
as the default service account will be if this value is
empty.
</description>
</property>
<property>
<name>dfs.cblock.iscsi.advertised.ip</name>
<value></value>
<tag>CBLOCK, KUBERNETES</tag>
<description>IP where the cblock target server is available
from the kubernetes nodes. Usually it's a cluster ip address
which is defined by a deployed Service.
</description>
</property>
<property>
<name>dfs.cblock.iscsi.advertised.port</name>
<value>3260</value>
<tag>CBLOCK, KUBERNETES</tag>
<description>Port where the cblock target server is available
from the kubernetes nodes. Could be different from the
listening port if jscsi is behind a Service.
</description>
</property>
</configuration>

View File

@ -1,456 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import com.google.common.primitives.Longs;
import static java.util.concurrent.TimeUnit.SECONDS;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_DISK_CACHE_PATH_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_TRACE_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
/**
* Tests for Local Cache Buffer Manager.
*/
public class TestBufferManager {
private final static long GB = 1024 * 1024 * 1024;
private final static int KB = 1024;
private static MiniOzoneCluster cluster;
private static OzoneConfiguration config;
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
private static XceiverClientManager xceiverClientManager;
@BeforeClass
public static void init() throws IOException {
config = new OzoneConfiguration();
String path = GenericTestUtils.getTempPath(
TestBufferManager.class.getSimpleName());
config.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
config.setBoolean(DFS_CBLOCK_TRACE_IO, true);
config.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
cluster = new MiniOzoneClassicCluster.Builder(config)
.numDataNodes(1).setHandlerType("distributed").build();
storageContainerLocationClient = cluster
.createStorageContainerLocationClient();
xceiverClientManager = new XceiverClientManager(config);
}
@AfterClass
public static void shutdown() throws InterruptedException {
if (cluster != null) {
cluster.shutdown();
}
IOUtils.cleanupWithLogger(null, storageContainerLocationClient, cluster);
}
/**
* createContainerAndGetPipeline creates a set of containers and returns the
* Pipelines that define those containers.
*
* @param count - Number of containers to create.
* @return - List of Pipelines.
* @throws IOException
*/
private List<Pipeline> createContainerAndGetPipeline(int count)
throws IOException {
List<Pipeline> containerPipelines = new LinkedList<>();
for (int x = 0; x < count; x++) {
String traceID = "trace" + RandomStringUtils.randomNumeric(4);
String containerName = "container" + RandomStringUtils.randomNumeric(10);
Pipeline pipeline =
storageContainerLocationClient.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerName, "CBLOCK");
XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
ContainerProtocolCalls.createContainer(client, traceID);
// This step is needed since we set private data on pipelines, when we
// read the list from CBlockServer. So we mimic that action here.
pipeline.setData(Longs.toByteArray(x));
containerPipelines.add(pipeline);
xceiverClientManager.releaseClient(client);
}
return containerPipelines;
}
/**
* This test writes some block to the cache and then shuts down the cache.
* The cache is then restarted to check that the
* correct number of blocks are read from Dirty Log
*
* @throws IOException
*/
@Test
public void testEmptyBlockBufferHandling() throws IOException,
InterruptedException, TimeoutException {
// Create a new config so that this tests write metafile to new location
OzoneConfiguration flushTestConfig = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestBufferManager.class.getSimpleName()
+ RandomStringUtils.randomNumeric(4));
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
List<Pipeline> pipelines = createContainerAndGetPipeline(10);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(pipelines)
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
// Write data to the cache
cache.put(1, data.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(0, metrics.getNumDirectBlockWrites());
Assert.assertEquals(1, metrics.getNumWriteOps());
cache.put(2, data.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(0, metrics.getNumDirectBlockWrites());
Assert.assertEquals(2, metrics.getNumWriteOps());
// Store the previous block buffer position
Assert.assertEquals(2, metrics.getNumBlockBufferUpdates());
// Simulate a shutdown by closing the cache
cache.close();
Thread.sleep(1000);
Assert.assertEquals(1, metrics.getNumBlockBufferFlushTriggered());
Assert.assertEquals(1, metrics.getNumBlockBufferFlushCompleted());
Assert.assertEquals(2 * (Long.SIZE/ Byte.SIZE),
metrics.getNumBytesDirtyLogWritten());
Assert.assertEquals(0, metrics.getNumFailedBlockBufferFlushes());
Assert.assertEquals(0, metrics.getNumInterruptedBufferWaits());
// Restart cache and check that right number of entries are read
CBlockTargetMetrics newMetrics = CBlockTargetMetrics.create();
ContainerCacheFlusher newFlusher =
new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, newMetrics);
CBlockLocalCache newCache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(pipelines)
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(newFlusher)
.setCBlockTargetMetrics(newMetrics)
.build();
newCache.start();
Thread fllushListenerThread = new Thread(newFlusher);
fllushListenerThread.setDaemon(true);
fllushListenerThread.start();
Thread.sleep(5000);
Assert.assertEquals(metrics.getNumBlockBufferUpdates(),
newMetrics.getNumDirtyLogBlockRead());
Assert.assertEquals(newMetrics.getNumDirtyLogBlockRead()
* (Long.SIZE/ Byte.SIZE), newMetrics.getNumBytesDirtyLogReads());
// Now shutdown again, nothing should be flushed
newFlusher.shutdown();
Assert.assertEquals(0, newMetrics.getNumBlockBufferUpdates());
Assert.assertEquals(0, newMetrics.getNumBytesDirtyLogWritten());
}
@Test
public void testPeriodicFlush() throws IOException,
InterruptedException, TimeoutException{
// Create a new config so that this tests write metafile to new location
OzoneConfiguration flushTestConfig = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestBufferManager.class.getSimpleName()
+ RandomStringUtils.randomNumeric(4));
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig
.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 5, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(createContainerAndGetPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
Thread.sleep(8000);
// Ticks will be at 5s, 10s and so on, so this count should be 1
Assert.assertEquals(1, metrics.getNumBlockBufferFlushTriggered());
// Nothing pushed to cache, so nothing should be written
Assert.assertEquals(0, metrics.getNumBytesDirtyLogWritten());
Assert.assertEquals(0, metrics.getNumBlockBufferFlushCompleted());
cache.close();
// After close, another trigger should happen but still no data written
Assert.assertEquals(2, metrics.getNumBlockBufferFlushTriggered());
Assert.assertEquals(0, metrics.getNumBytesDirtyLogWritten());
Assert.assertEquals(0, metrics.getNumBlockBufferFlushCompleted());
Assert.assertEquals(0, metrics.getNumFailedBlockBufferFlushes());
}
@Test
public void testSingleBufferFlush() throws IOException,
InterruptedException, TimeoutException {
// Create a new config so that this tests write metafile to new location
OzoneConfiguration flushTestConfig = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestBufferManager.class.getSimpleName()
+ RandomStringUtils.randomNumeric(4));
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(createContainerAndGetPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
for (int i = 0; i < 511; i++) {
cache.put(i, data.getBytes(StandardCharsets.UTF_8));
}
// After writing 511 block no flush should happen
Assert.assertEquals(0, metrics.getNumBlockBufferFlushTriggered());
Assert.assertEquals(0, metrics.getNumBlockBufferFlushCompleted());
// After one more block it should
cache.put(512, data.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(1, metrics.getNumBlockBufferFlushTriggered());
Thread.sleep(1000);
Assert.assertEquals(1, metrics.getNumBlockBufferFlushCompleted());
cache.close();
Assert.assertEquals(512 * (Long.SIZE / Byte.SIZE),
metrics.getNumBytesDirtyLogWritten());
}
@Test
public void testMultipleBuffersFlush() throws IOException,
InterruptedException, TimeoutException {
// Create a new config so that this tests write metafile to new location
OzoneConfiguration flushTestConfig = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestBufferManager.class.getSimpleName()
+ RandomStringUtils.randomNumeric(4));
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig
.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 120, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(createContainerAndGetPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 512; j++) {
cache.put(i * 512 + j, data.getBytes(StandardCharsets.UTF_8));
}
// Flush should be triggered after every 512 block write
Assert.assertEquals(i + 1, metrics.getNumBlockBufferFlushTriggered());
}
Assert.assertEquals(0, metrics.getNumIllegalDirtyLogFiles());
Assert.assertEquals(0, metrics.getNumFailedDirtyLogFileDeletes());
cache.close();
Assert.assertEquals(4 * 512 * (Long.SIZE / Byte.SIZE),
metrics.getNumBytesDirtyLogWritten());
Assert.assertEquals(5, metrics.getNumBlockBufferFlushTriggered());
Assert.assertEquals(4, metrics.getNumBlockBufferFlushCompleted());
}
@Test
public void testSingleBlockFlush() throws IOException,
InterruptedException, TimeoutException{
// Create a new config so that this tests write metafile to new location
OzoneConfiguration flushTestConfig = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestBufferManager.class.getSimpleName()
+ RandomStringUtils.randomNumeric(4));
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig
.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
5, SECONDS);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(createContainerAndGetPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
cache.put(0, data.getBytes(StandardCharsets.UTF_8));
Thread.sleep(8000);
// Ticks will be at 5s, 10s and so on, so this count should be 1
Assert.assertEquals(1, metrics.getNumBlockBufferFlushTriggered());
// 1 block written to cache, which should be flushed
Assert.assertEquals(8, metrics.getNumBytesDirtyLogWritten());
Assert.assertEquals(1, metrics.getNumBlockBufferFlushCompleted());
cache.close();
// After close, another trigger should happen but no data should be written
Assert.assertEquals(2, metrics.getNumBlockBufferFlushTriggered());
Assert.assertEquals(8, metrics.getNumBytesDirtyLogWritten());
Assert.assertEquals(1, metrics.getNumBlockBufferFlushCompleted());
Assert.assertEquals(0, metrics.getNumFailedBlockBufferFlushes());
}
@Test
public void testRepeatedBlockWrites() throws IOException,
InterruptedException, TimeoutException{
// Create a new config so that this tests write metafile to new location
OzoneConfiguration flushTestConfig = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestBufferManager.class.getSimpleName()
+ RandomStringUtils.randomNumeric(4));
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(createContainerAndGetPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
Thread fllushListenerThread = new Thread(flusher);
fllushListenerThread.setDaemon(true);
fllushListenerThread.start();
cache.start();
for (int i = 0; i < 512; i++) {
cache.put(i, data.getBytes(StandardCharsets.UTF_8));
}
Assert.assertEquals(512, metrics.getNumWriteOps());
Assert.assertEquals(512, metrics.getNumBlockBufferUpdates());
Assert.assertEquals(1, metrics.getNumBlockBufferFlushTriggered());
Thread.sleep(5000);
Assert.assertEquals(1, metrics.getNumBlockBufferFlushCompleted());
for (int i = 0; i < 512; i++) {
cache.put(i, data.getBytes(StandardCharsets.UTF_8));
}
Assert.assertEquals(1024, metrics.getNumWriteOps());
Assert.assertEquals(1024, metrics.getNumBlockBufferUpdates());
Assert.assertEquals(2, metrics.getNumBlockBufferFlushTriggered());
Thread.sleep(5000);
Assert.assertEquals(0, metrics.getNumWriteIOExceptionRetryBlocks());
Assert.assertEquals(0, metrics.getNumWriteGenericExceptionRetryBlocks());
Assert.assertEquals(2, metrics.getNumBlockBufferFlushCompleted());
}
}

View File

@ -1,35 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import org.apache.hadoop.conf.TestConfigurationFieldsBase;
/**
* Tests if configuration constants documented in ozone-defaults.xml.
*/
public class TestCBlockConfigurationFields extends TestConfigurationFieldsBase {
@Override
public void initializeMemberVariables() {
xmlFilename = new String("cblock-default.xml");
configurationClasses =
new Class[] {CBlockConfigKeys.class};
errorIfMissingConfigProps = true;
errorIfMissingXmlProps = true;
}
}

View File

@ -1,377 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import com.google.common.primitives.Longs;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_DISK_CACHE_PATH_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_TRACE_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE;
/**
* Tests for Cblock read write functionality.
*/
public class TestCBlockReadWrite {
private final static long GB = 1024 * 1024 * 1024;
private final static int KB = 1024;
private static MiniOzoneCluster cluster;
private static OzoneConfiguration config;
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
private static XceiverClientManager xceiverClientManager;
@BeforeClass
public static void init() throws IOException {
config = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestCBlockReadWrite.class.getSimpleName());
config.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
config.setBoolean(DFS_CBLOCK_TRACE_IO, true);
config.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
cluster = new MiniOzoneClassicCluster.Builder(config)
.numDataNodes(1)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
storageContainerLocationClient = cluster
.createStorageContainerLocationClient();
xceiverClientManager = new XceiverClientManager(config);
}
@AfterClass
public static void shutdown() throws InterruptedException {
if (cluster != null) {
cluster.shutdown();
}
IOUtils.cleanupWithLogger(null, storageContainerLocationClient, cluster);
}
/**
* getContainerPipelines creates a set of containers and returns the
* Pipelines that define those containers.
*
* @param count - Number of containers to create.
* @return - List of Pipelines.
* @throws IOException throws Exception
*/
private List<Pipeline> getContainerPipeline(int count) throws IOException {
List<Pipeline> containerPipelines = new LinkedList<>();
for (int x = 0; x < count; x++) {
String traceID = "trace" + RandomStringUtils.randomNumeric(4);
String containerName = "container" + RandomStringUtils.randomNumeric(10);
Pipeline pipeline =
storageContainerLocationClient.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerName, "CBLOCK");
XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
ContainerProtocolCalls.createContainer(client, traceID);
// This step is needed since we set private data on pipelines, when we
// read the list from CBlockServer. So we mimic that action here.
pipeline.setData(Longs.toByteArray(x));
containerPipelines.add(pipeline);
}
return containerPipelines;
}
/**
* This test creates a cache and performs a simple write / read.
* The operations are done by bypassing the cache.
*
* @throws IOException
*/
@Test
public void testDirectIO() throws IOException,
InterruptedException, TimeoutException {
OzoneConfiguration cConfig = new OzoneConfiguration();
cConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, false);
cConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
final long blockID = 0;
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
String dataHash = DigestUtils.sha256Hex(data);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(cConfig,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(cConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(getContainerPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
Assert.assertFalse(cache.isShortCircuitIOEnabled());
cache.put(blockID, data.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(1, metrics.getNumDirectBlockWrites());
Assert.assertEquals(1, metrics.getNumWriteOps());
// Please note that this read is directly from remote container
LogicalBlock block = cache.get(blockID);
Assert.assertEquals(1, metrics.getNumReadOps());
Assert.assertEquals(0, metrics.getNumReadCacheHits());
Assert.assertEquals(1, metrics.getNumReadCacheMiss());
Assert.assertEquals(0, metrics.getNumReadLostBlocks());
Assert.assertEquals(0, metrics.getNumFailedDirectBlockWrites());
cache.put(blockID + 1, data.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(2, metrics.getNumDirectBlockWrites());
Assert.assertEquals(2, metrics.getNumWriteOps());
Assert.assertEquals(0, metrics.getNumFailedDirectBlockWrites());
// Please note that this read is directly from remote container
block = cache.get(blockID + 1);
Assert.assertEquals(2, metrics.getNumReadOps());
Assert.assertEquals(0, metrics.getNumReadCacheHits());
Assert.assertEquals(2, metrics.getNumReadCacheMiss());
Assert.assertEquals(0, metrics.getNumReadLostBlocks());
String readHash = DigestUtils.sha256Hex(block.getData().array());
Assert.assertEquals("File content does not match.", dataHash, readHash);
GenericTestUtils.waitFor(() -> !cache.isDirtyCache(), 100, 20 * 1000);
cache.close();
}
/**
* This test writes some block to the cache and then shuts down the cache
* The cache is then restarted with "short.circuit.io" disable to check
* that the blocks are read correctly from the container.
*
* @throws IOException
*/
@Test
public void testContainerWrites() throws IOException,
InterruptedException, TimeoutException {
// Create a new config so that this tests write metafile to new location
OzoneConfiguration flushTestConfig = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestCBlockReadWrite.class.getSimpleName());
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL, 3,
TimeUnit.SECONDS);
XceiverClientManager xcm = new XceiverClientManager(flushTestConfig);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
int numUniqueBlocks = 4;
String[] data = new String[numUniqueBlocks];
String[] dataHash = new String[numUniqueBlocks];
for (int i = 0; i < numUniqueBlocks; i++) {
data[i] = RandomStringUtils.random(4 * KB);
dataHash[i] = DigestUtils.sha256Hex(data[i]);
}
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(flushTestConfig,
xcm, metrics);
List<Pipeline> pipelines = getContainerPipeline(10);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(pipelines)
.setClientManager(xcm)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
Thread flushListenerThread = new Thread(flusher);
flushListenerThread.setDaemon(true);
flushListenerThread.start();
Assert.assertTrue(cache.isShortCircuitIOEnabled());
// Write data to the cache
for (int i = 0; i < 512; i++) {
cache.put(i, data[i % numUniqueBlocks].getBytes(StandardCharsets.UTF_8));
}
// Close the cache and flush the data to the containers
cache.close();
Assert.assertEquals(0, metrics.getNumDirectBlockWrites());
Assert.assertEquals(512, metrics.getNumWriteOps());
Thread.sleep(3000);
flusher.shutdown();
Assert.assertTrue(metrics.getNumBlockBufferFlushTriggered() > 1);
Assert.assertEquals(1, metrics.getNumBlockBufferFlushCompleted());
Assert.assertEquals(0, metrics.getNumWriteIOExceptionRetryBlocks());
Assert.assertEquals(0, metrics.getNumWriteGenericExceptionRetryBlocks());
Assert.assertEquals(0, metrics.getNumFailedReleaseLevelDB());
// Now disable DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO and restart cache
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, false);
CBlockTargetMetrics newMetrics = CBlockTargetMetrics.create();
ContainerCacheFlusher newFlusher =
new ContainerCacheFlusher(flushTestConfig, xcm, newMetrics);
CBlockLocalCache newCache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(pipelines)
.setClientManager(xcm)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(newFlusher)
.setCBlockTargetMetrics(newMetrics)
.build();
newCache.start();
Assert.assertFalse(newCache.isShortCircuitIOEnabled());
// this read will be from the container, also match the hash
for (int i = 0; i < 512; i++) {
LogicalBlock block = newCache.get(i);
String readHash = DigestUtils.sha256Hex(block.getData().array());
Assert.assertEquals("File content does not match, for index:"
+ i, dataHash[i % numUniqueBlocks], readHash);
}
Assert.assertEquals(0, newMetrics.getNumReadLostBlocks());
Assert.assertEquals(0, newMetrics.getNumFailedReadBlocks());
newCache.close();
newFlusher.shutdown();
}
@Test
public void testRetryLog() throws IOException,
InterruptedException, TimeoutException {
// Create a new config so that this tests write metafile to new location
OzoneConfiguration flushTestConfig = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestCBlockReadWrite.class.getSimpleName());
flushTestConfig.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
flushTestConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
flushTestConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
flushTestConfig.setTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,
3,
TimeUnit.SECONDS);
int numblocks = 10;
flushTestConfig.setInt(DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE, numblocks);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
List<Pipeline> fakeContainerPipelines = new LinkedList<>();
PipelineChannel pipelineChannel = new PipelineChannel("fake",
LifeCycleState.OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
"fake");
Pipeline fakePipeline = new Pipeline("fake", pipelineChannel);
fakePipeline.setData(Longs.toByteArray(1));
fakeContainerPipelines.add(fakePipeline);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(fakeContainerPipelines)
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
Thread flushListenerThread = new Thread(flusher);
flushListenerThread.setDaemon(true);
flushListenerThread.start();
for (int i = 0; i < numblocks; i++) {
cache.put(i, data.getBytes(StandardCharsets.UTF_8));
}
Assert.assertEquals(numblocks, metrics.getNumWriteOps());
Thread.sleep(3000);
// all the writes to the container will fail because of fake pipelines
Assert.assertEquals(numblocks, metrics.getNumDirtyLogBlockRead());
Assert.assertTrue(
metrics.getNumWriteGenericExceptionRetryBlocks() >= numblocks);
Assert.assertEquals(0, metrics.getNumWriteIOExceptionRetryBlocks());
Assert.assertEquals(0, metrics.getNumFailedRetryLogFileWrites());
Assert.assertEquals(0, metrics.getNumFailedReleaseLevelDB());
cache.close();
flusher.shutdown();
// restart cache with correct pipelines, now blocks should be uploaded
// correctly
CBlockTargetMetrics newMetrics = CBlockTargetMetrics.create();
ContainerCacheFlusher newFlusher =
new ContainerCacheFlusher(flushTestConfig,
xceiverClientManager, newMetrics);
CBlockLocalCache newCache = CBlockLocalCache.newBuilder()
.setConfiguration(flushTestConfig)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(getContainerPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(newFlusher)
.setCBlockTargetMetrics(newMetrics)
.build();
newCache.start();
Thread newFlushListenerThread = new Thread(newFlusher);
newFlushListenerThread.setDaemon(true);
newFlushListenerThread.start();
Thread.sleep(3000);
Assert.assertTrue(newMetrics.getNumRetryLogBlockRead() >= numblocks);
Assert.assertEquals(0, newMetrics.getNumWriteGenericExceptionRetryBlocks());
Assert.assertEquals(0, newMetrics.getNumWriteIOExceptionRetryBlocks());
Assert.assertEquals(0, newMetrics.getNumFailedReleaseLevelDB());
}
}

View File

@ -1,212 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.cblock.util.MockStorageClient;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_ADDRESS_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* This class tests the basics of CBlock server. Mainly about the four
* operations on volumes: create, delete, info and list.
*/
public class TestCBlockServer {
private static CBlockManager cBlockManager;
private static OzoneConfiguration conf;
@Before
public void setup() throws Exception {
ScmClient storageClient = new MockStorageClient();
conf = new OzoneConfiguration();
conf.set(DFS_CBLOCK_SERVICERPC_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY, "127.0.0.1:0");
cBlockManager = new CBlockManager(conf, storageClient);
cBlockManager.start();
}
@After
public void clean() {
cBlockManager.stop();
cBlockManager.join();
cBlockManager.clean();
}
/**
* Test create volume for different users.
* @throws Exception
*/
@Test
public void testCreateVolume() throws Exception {
String userName1 = "user" + RandomStringUtils.randomNumeric(5);
String userName2 = "user" + RandomStringUtils.randomNumeric(5);
String volumeName1 = "volume" + RandomStringUtils.randomNumeric(5);
String volumeName2 = "volume" + RandomStringUtils.randomNumeric(5);
long volumeSize = 1L*1024*1024;
int blockSize = 4096;
cBlockManager.createVolume(userName1, volumeName1, volumeSize, blockSize);
List<VolumeInfo> volumes = cBlockManager.listVolume(userName1);
assertEquals(1, volumes.size());
VolumeInfo existingVolume = volumes.get(0);
assertEquals(userName1, existingVolume.getUserName());
assertEquals(volumeName1, existingVolume.getVolumeName());
assertEquals(volumeSize, existingVolume.getVolumeSize());
assertEquals(blockSize, existingVolume.getBlockSize());
cBlockManager.createVolume(userName1, volumeName2, volumeSize, blockSize);
cBlockManager.createVolume(userName2, volumeName1, volumeSize, blockSize);
volumes = cBlockManager.listVolume(userName1);
assertEquals(2, volumes.size());
volumes = cBlockManager.listVolume(userName2);
assertEquals(1, volumes.size());
}
/**
* Test delete volume.
* @throws Exception
*/
@Test
public void testDeleteVolume() throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String volumeName1 = "volume" + RandomStringUtils.randomNumeric(5);
String volumeName2 = "volume" + RandomStringUtils.randomNumeric(5);
long volumeSize = 1L*1024*1024;
int blockSize = 4096;
cBlockManager.createVolume(userName, volumeName1, volumeSize, blockSize);
cBlockManager.createVolume(userName, volumeName2, volumeSize, blockSize);
cBlockManager.deleteVolume(userName, volumeName1, true);
List<VolumeInfo> volumes = cBlockManager.listVolume(userName);
assertEquals(1, volumes.size());
VolumeInfo existingVolume = volumes.get(0);
assertEquals(userName, existingVolume.getUserName());
assertEquals(volumeName2, existingVolume.getVolumeName());
assertEquals(volumeSize, existingVolume.getVolumeSize());
assertEquals(blockSize, existingVolume.getBlockSize());
}
/**
* Test info volume.
*
* TODO : usage field is not being tested (as it is not implemented yet)
* @throws Exception
*/
@Test
public void testInfoVolume() throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
long volumeSize = 1L*1024*1024;
int blockSize = 4096;
cBlockManager.createVolume(userName, volumeName, volumeSize, blockSize);
VolumeInfo info = cBlockManager.infoVolume(userName, volumeName);
assertEquals(userName, info.getUserName());
assertEquals(volumeName, info.getVolumeName());
assertEquals(volumeSize, info.getVolumeSize());
assertEquals(blockSize, info.getBlockSize());
}
/**
* Test listing a number of volumes.
* @throws Exception
*/
@Test
public void testListVolume() throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String volumeName ="volume" + RandomStringUtils.randomNumeric(5);
long volumeSize = 1L*1024*1024;
int blockSize = 4096;
int volumeNum = 100;
for (int i = 0; i<volumeNum; i++) {
cBlockManager.createVolume(userName, volumeName + i,
volumeSize, blockSize);
}
List<VolumeInfo> volumes = cBlockManager.listVolume(userName);
assertEquals(volumeNum, volumes.size());
Set<String> volumeIds = new HashSet<>();
for (int i = 0; i<volumeNum; i++) {
VolumeInfo volumeInfo = volumes.get(i);
assertEquals(userName, volumeInfo.getUserName());
assertFalse(volumeIds.contains(volumeName + i));
volumeIds.add(volumeName + i);
assertEquals(volumeSize, volumeInfo.getVolumeSize());
assertEquals(blockSize, volumeInfo.getBlockSize());
}
for (int i = 0; i<volumeNum; i++) {
assertTrue(volumeIds.contains(volumeName + i));
}
}
/**
* Test listing a number of volumes.
* @throws Exception
*/
@Test
public void testListVolumes() throws Exception {
String volumeName ="volume" + RandomStringUtils.randomNumeric(5);
long volumeSize = 1L*1024*1024;
int blockSize = 4096;
int volumeNum = 100;
int userCount = 10;
assertTrue("We need at least one volume for each user",
userCount < volumeNum);
for (int i = 0; i<volumeNum; i++) {
String userName =
"user-" + (i % userCount);
cBlockManager.createVolume(userName, volumeName + i,
volumeSize, blockSize);
}
List<VolumeInfo> allVolumes = cBlockManager.listVolumes();
//check if we have the volumes from all the users.
Set<String> volumeIds = new HashSet<>();
Set<String> usernames = new HashSet<>();
for (int i = 0; i < allVolumes.size(); i++) {
VolumeInfo volumeInfo = allVolumes.get(i);
assertFalse(volumeIds.contains(volumeName + i));
usernames.add(volumeInfo.getUserName());
volumeIds.add(volumeName + i);
assertEquals(volumeSize, volumeInfo.getVolumeSize());
assertEquals(blockSize, volumeInfo.getBlockSize());
}
assertEquals(volumeNum, volumeIds.size());
for (int i = 0; i<volumeNum; i++) {
assertTrue(volumeIds.contains(volumeName + i));
}
assertEquals(userCount, usernames.size());
}
}

View File

@ -1,132 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.cblock.util.MockStorageClient;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.List;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY;
import static org.junit.Assert.assertEquals;
/**
* Test the CBlock server state is maintained in persistent storage and can be
* recovered on CBlock server restart.
*/
public class TestCBlockServerPersistence {
/**
* Test when cblock server fails with volume meta data, the meta data can be
* restored correctly.
* @throws Exception
*/
@Test
public void testWriteToPersistentStore() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
String userName = "testWriteToPersistentStore";
String volumeName1 = "testVolume1";
String volumeName2 = "testVolume2";
long volumeSize1 = 30L*1024*1024*1024;
long volumeSize2 = 15L*1024*1024*1024;
int blockSize = 4096;
CBlockManager cBlockManager = null;
CBlockManager cBlockManager1 = null;
String path = GenericTestUtils
.getTempPath(TestCBlockServerPersistence.class.getSimpleName());
File filePath = new File(path);
if(!filePath.exists() && !filePath.mkdirs()) {
throw new IOException("Unable to create test DB dir");
}
conf.set(DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY, path.concat(
"/testCblockPersistence.dat"));
try {
ScmClient storageClient = new MockStorageClient();
conf.set(DFS_CBLOCK_SERVICERPC_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY, "127.0.0.1:0");
cBlockManager = new CBlockManager(conf, storageClient);
cBlockManager.start();
cBlockManager.createVolume(userName, volumeName1, volumeSize1, blockSize);
cBlockManager.createVolume(userName, volumeName2, volumeSize2, blockSize);
List<VolumeDescriptor> allVolumes = cBlockManager.getAllVolumes();
// close the cblock server. Since meta data is written to disk on volume
// creation, closing server here is the same as a cblock server crash.
cBlockManager.close();
cBlockManager.stop();
cBlockManager.join();
cBlockManager = null;
assertEquals(2, allVolumes.size());
VolumeDescriptor volumeDescriptor1 = allVolumes.get(0);
VolumeDescriptor volumeDescriptor2 = allVolumes.get(1);
// create a new cblock server instance. This is just the
// same as restarting cblock server.
ScmClient storageClient1 = new MockStorageClient();
OzoneConfiguration conf1 = new OzoneConfiguration();
conf1.set(DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY, path.concat(
"/testCblockPersistence.dat"));
conf1.set(DFS_CBLOCK_SERVICERPC_ADDRESS_KEY, "127.0.0.1:0");
conf1.set(DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY, "127.0.0.1:0");
cBlockManager1 = new CBlockManager(conf1, storageClient1);
cBlockManager1.start();
List<VolumeDescriptor> allVolumes1 = cBlockManager1.getAllVolumes();
assertEquals(2, allVolumes1.size());
VolumeDescriptor newvolumeDescriptor1 = allVolumes1.get(0);
VolumeDescriptor newvolumeDescriptor2 = allVolumes1.get(1);
// It seems levelDB iterator gets keys in the same order as keys
// are inserted, in which case the else clause should never happen.
// But still kept the second clause if it is possible to get different
// key ordering from leveldb. And we do not rely on the ordering of keys
// here.
if (volumeDescriptor1.getVolumeName().equals(
newvolumeDescriptor1.getVolumeName())) {
assertEquals(volumeDescriptor1.toString(),
newvolumeDescriptor1.toString());
assertEquals(volumeDescriptor2.toString(),
newvolumeDescriptor2.toString());
} else {
assertEquals(volumeDescriptor1.toString(),
newvolumeDescriptor2.toString());
assertEquals(volumeDescriptor2.toString(),
newvolumeDescriptor1.toString());
}
} finally {
if (cBlockManager != null) {
cBlockManager.clean();
}
if (cBlockManager1 != null) {
cBlockManager1.close();
cBlockManager1.stop();
cBlockManager1.join();
cBlockManager1.clean();
}
}
}
}

View File

@ -1,444 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import com.google.common.primitives.Longs;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.jscsiHelper.CBlockIStorageImpl;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static java.lang.Math.abs;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_DISK_CACHE_PATH_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO;
import static org.apache.hadoop.cblock.CBlockConfigKeys.
DFS_CBLOCK_TRACE_IO;
/**
* Tests for local cache.
*/
public class TestLocalBlockCache {
private static final Logger LOG =
LoggerFactory.getLogger(TestLocalBlockCache.class);
private final static long GB = 1024 * 1024 * 1024;
private final static int KB = 1024;
private static MiniOzoneCluster cluster;
private static OzoneConfiguration config;
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
private static XceiverClientManager xceiverClientManager;
@BeforeClass
public static void init() throws IOException {
config = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestLocalBlockCache.class.getSimpleName());
config.set(DFS_CBLOCK_DISK_CACHE_PATH_KEY, path);
config.setBoolean(DFS_CBLOCK_TRACE_IO, true);
config.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, true);
cluster = new MiniOzoneClassicCluster.Builder(config)
.numDataNodes(1)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
storageContainerLocationClient = cluster
.createStorageContainerLocationClient();
xceiverClientManager = new XceiverClientManager(config);
}
@AfterClass
public static void shutdown() throws InterruptedException {
if (cluster != null) {
cluster.shutdown();
}
IOUtils.cleanupWithLogger(null, storageContainerLocationClient, cluster);
}
/**
* getContainerPipelines creates a set of containers and returns the
* Pipelines that define those containers.
*
* @param count - Number of containers to create.
* @return - List of Pipelines.
* @throws IOException throws Exception
*/
private List<Pipeline> getContainerPipeline(int count) throws IOException {
List<Pipeline> containerPipelines = new LinkedList<>();
for (int x = 0; x < count; x++) {
String traceID = "trace" + RandomStringUtils.randomNumeric(4);
String containerName = "container" + RandomStringUtils.randomNumeric(10);
Pipeline pipeline =
storageContainerLocationClient.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerName, "CBLOCK");
XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
ContainerProtocolCalls.createContainer(client, traceID);
// This step is needed since we set private data on pipelines, when we
// read the list from CBlockServer. So we mimic that action here.
pipeline.setData(Longs.toByteArray(x));
containerPipelines.add(pipeline);
xceiverClientManager.releaseClient(client);
}
return containerPipelines;
}
/**
* This test creates a cache and performs a simple write / read.
* Due to the cache - we have Read-after-write consistency for cBlocks.
*
* @throws IOException throws Exception
*/
@Test
public void testCacheWriteRead() throws IOException,
InterruptedException, TimeoutException {
final long blockID = 0;
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
String dataHash = DigestUtils.sha256Hex(data);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(this.config,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(this.config)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(getContainerPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
cache.put(blockID, data.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(1, metrics.getNumWriteOps());
// Please note that this read is from the local cache.
LogicalBlock block = cache.get(blockID);
Assert.assertEquals(1, metrics.getNumReadOps());
Assert.assertEquals(1, metrics.getNumReadCacheHits());
Assert.assertEquals(0, metrics.getNumReadCacheMiss());
Assert.assertEquals(0, metrics.getNumReadLostBlocks());
cache.put(blockID + 1, data.getBytes(StandardCharsets.UTF_8));
Assert.assertEquals(2, metrics.getNumWriteOps());
// Please note that this read is from the local cache.
block = cache.get(blockID + 1);
Assert.assertEquals(2, metrics.getNumReadOps());
Assert.assertEquals(2, metrics.getNumReadCacheHits());
Assert.assertEquals(0, metrics.getNumReadCacheMiss());
Assert.assertEquals(0, metrics.getNumReadLostBlocks());
String readHash = DigestUtils.sha256Hex(block.getData().array());
Assert.assertEquals("File content does not match.", dataHash, readHash);
GenericTestUtils.waitFor(() -> !cache.isDirtyCache(), 100, 20 * 1000);
cache.close();
}
@Test
public void testCacheWriteToRemoteContainer() throws IOException,
InterruptedException, TimeoutException {
final long blockID = 0;
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(this.config,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(this.config)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(getContainerPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
cache.put(blockID, data.getBytes(StandardCharsets.UTF_8));
GenericTestUtils.waitFor(() -> !cache.isDirtyCache(), 100, 20 * 1000);
cache.close();
}
@Test
public void testCacheWriteToRemote50KBlocks() throws IOException,
InterruptedException, TimeoutException {
final long totalBlocks = 50 * 1000;
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
String data = RandomStringUtils.random(4 * KB);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(this.config,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(this.config)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(getContainerPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * 1024)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
long startTime = Time.monotonicNow();
for (long blockid = 0; blockid < totalBlocks; blockid++) {
cache.put(blockid, data.getBytes(StandardCharsets.UTF_8));
}
Assert.assertEquals(totalBlocks, metrics.getNumWriteOps());
Assert.assertEquals(totalBlocks, metrics.getNumBlockBufferUpdates());
LOG.info("Wrote 50K blocks, waiting for replication to finish.");
GenericTestUtils.waitFor(() -> !cache.isDirtyCache(), 100, 20 * 1000);
long endTime = Time.monotonicNow();
LOG.info("Time taken for writing {} blocks is {} seconds", totalBlocks,
TimeUnit.MILLISECONDS.toSeconds(endTime - startTime));
// TODO: Read this data back.
cache.close();
}
@Test
public void testCacheInvalidBlock() throws IOException {
final int blockID = 1024;
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(this.config,
xceiverClientManager, metrics);
CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(this.config)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(getContainerPipeline(10))
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
// Read a non-existent block ID.
LogicalBlock block = cache.get(blockID);
Assert.assertNotNull(block);
Assert.assertEquals(4 * 1024, block.getData().array().length);
Assert.assertEquals(1, metrics.getNumReadOps());
Assert.assertEquals(1, metrics.getNumReadLostBlocks());
Assert.assertEquals(1, metrics.getNumReadCacheMiss());
cache.close();
}
@Test
public void testReadWriteCorrectness() throws IOException,
InterruptedException, TimeoutException {
Random r = new Random();
final int maxBlock = 12500000;
final int blockCount = 10 * 1000;
Map<Long, String> blockShaMap = new HashMap<>();
List<Pipeline> pipelines = getContainerPipeline(10);
String volumeName = "volume" + RandomStringUtils.randomNumeric(4);
String userName = "user" + RandomStringUtils.randomNumeric(4);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(this.config,
xceiverClientManager, metrics);
final CBlockLocalCache cache = CBlockLocalCache.newBuilder()
.setConfiguration(this.config)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(pipelines)
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
cache.start();
for (int x = 0; x < blockCount; x++) {
String data = RandomStringUtils.random(4 * 1024);
String dataHash = DigestUtils.sha256Hex(data);
long blockId = abs(r.nextInt(maxBlock));
blockShaMap.put(blockId, dataHash);
cache.put(blockId, data.getBytes(StandardCharsets.UTF_8));
}
Assert.assertEquals(blockCount, metrics.getNumWriteOps());
GenericTestUtils.waitFor(() -> !cache.isDirtyCache(), 100, 20 * 1000);
LOG.info("Finished with putting blocks ..starting reading blocks back. " +
"unique blocks : {}", blockShaMap.size());
// Test reading from local cache.
for (Map.Entry<Long, String> entry : blockShaMap.entrySet()) {
LogicalBlock block = cache.get(entry.getKey());
String blockSha = DigestUtils.sha256Hex(block.getData().array());
Assert.assertEquals("Block data is not equal", entry.getValue(),
blockSha);
}
Assert.assertEquals(blockShaMap.size(), metrics.getNumReadOps());
Assert.assertEquals(blockShaMap.size(), metrics.getNumReadCacheHits());
Assert.assertEquals(0, metrics.getNumReadCacheMiss());
Assert.assertEquals(0, metrics.getNumReadLostBlocks());
LOG.info("Finished with reading blocks, SUCCESS.");
// Close and discard local cache.
cache.close();
LOG.info("Closing the and destroying local cache");
CBlockTargetMetrics newMetrics = CBlockTargetMetrics.create();
ContainerCacheFlusher newflusher = new ContainerCacheFlusher(this.config,
xceiverClientManager, newMetrics);
Assert.assertEquals(0, newMetrics.getNumReadCacheHits());
CBlockLocalCache newCache = null;
try {
newCache = CBlockLocalCache.newBuilder()
.setConfiguration(this.config)
.setVolumeName(volumeName)
.setUserName(userName)
.setPipelines(pipelines)
.setClientManager(xceiverClientManager)
.setBlockSize(4 * KB)
.setVolumeSize(50 * GB)
.setFlusher(newflusher)
.setCBlockTargetMetrics(newMetrics)
.build();
newCache.start();
for (Map.Entry<Long, String> entry : blockShaMap.entrySet()) {
LogicalBlock block = newCache.get(entry.getKey());
String blockSha = DigestUtils.sha256Hex(block.getData().array());
Assert.assertEquals("Block data is not equal", entry.getValue(),
blockSha);
}
Assert.assertEquals(blockShaMap.size(), newMetrics.getNumReadOps());
Assert.assertEquals(blockShaMap.size(), newMetrics.getNumReadCacheHits());
Assert.assertEquals(0, newMetrics.getNumReadCacheMiss());
Assert.assertEquals(0, newMetrics.getNumReadLostBlocks());
LOG.info("Finished with reading blocks from remote cache, SUCCESS.");
} finally {
if (newCache != null) {
newCache.close();
}
}
}
@Test
public void testStorageImplReadWrite() throws IOException,
InterruptedException, TimeoutException {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
long volumeSize = 50L * (1024L * 1024L * 1024L);
int blockSize = 4096;
byte[] data =
RandomStringUtils.randomAlphanumeric(10 * (1024 * 1024))
.getBytes(StandardCharsets.UTF_8);
String hash = DigestUtils.sha256Hex(data);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(this.config,
xceiverClientManager, metrics);
CBlockIStorageImpl ozoneStore = CBlockIStorageImpl.newBuilder()
.setUserName(userName)
.setVolumeName(volumeName)
.setVolumeSize(volumeSize)
.setBlockSize(blockSize)
.setContainerList(getContainerPipeline(10))
.setClientManager(xceiverClientManager)
.setConf(this.config)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
ozoneStore.write(data, 0);
byte[] newData = new byte[10 * 1024 * 1024];
ozoneStore.read(newData, 0);
String newHash = DigestUtils.sha256Hex(newData);
Assert.assertEquals("hashes don't match.", hash, newHash);
GenericTestUtils.waitFor(() -> !ozoneStore.getCache().isDirtyCache(),
100, 20 * 1000);
ozoneStore.close();
}
//@Test
// Disabling this test for time being since the bug in JSCSI
// forces us always to have a local cache.
public void testStorageImplNoLocalCache() throws IOException,
InterruptedException, TimeoutException {
OzoneConfiguration oConfig = new OzoneConfiguration();
oConfig.setBoolean(DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO, false);
oConfig.setBoolean(DFS_CBLOCK_TRACE_IO, true);
String userName = "user" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
long volumeSize = 50L * (1024L * 1024L * 1024L);
int blockSize = 4096;
byte[] data =
RandomStringUtils.randomAlphanumeric(10 * (1024 * 1024))
.getBytes(StandardCharsets.UTF_8);
String hash = DigestUtils.sha256Hex(data);
CBlockTargetMetrics metrics = CBlockTargetMetrics.create();
ContainerCacheFlusher flusher = new ContainerCacheFlusher(oConfig,
xceiverClientManager, metrics);
CBlockIStorageImpl ozoneStore = CBlockIStorageImpl.newBuilder()
.setUserName(userName)
.setVolumeName(volumeName)
.setVolumeSize(volumeSize)
.setBlockSize(blockSize)
.setContainerList(getContainerPipeline(10))
.setClientManager(xceiverClientManager)
.setConf(oConfig)
.setFlusher(flusher)
.setCBlockTargetMetrics(metrics)
.build();
ozoneStore.write(data, 0);
byte[] newData = new byte[10 * 1024 * 1024];
ozoneStore.read(newData, 0);
String newHash = DigestUtils.sha256Hex(newData);
Assert.assertEquals("hashes don't match.", hash, newHash);
GenericTestUtils.waitFor(() -> !ozoneStore.getCache().isDirtyCache(),
100, 20 * 1000);
ozoneStore.close();
}
}

View File

@ -1,74 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.kubernetes;
import io.kubernetes.client.JSON;
import io.kubernetes.client.models.V1PersistentVolume;
import io.kubernetes.client.models.V1PersistentVolumeClaim;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_ISCSI_ADVERTISED_IP;
import org.junit.Assert;
import org.junit.Test;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
/**
* Test the resource generation of Dynamic Provisioner.
*/
public class TestDynamicProvisioner {
@Test
public void persitenceVolumeBuilder() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setStrings(DFS_CBLOCK_ISCSI_ADVERTISED_IP, "1.2.3.4");
DynamicProvisioner provisioner =
new DynamicProvisioner(conf, null);
String pvc = new String(Files.readAllBytes(
Paths.get(getClass().getResource(
"/dynamicprovisioner/input1-pvc.json").toURI())));
String pv = new String(Files.readAllBytes(
Paths.get(getClass().getResource(
"/dynamicprovisioner/expected1-pv.json").toURI())));
JSON json = new io.kubernetes.client.JSON();
V1PersistentVolumeClaim claim =
json.getGson().fromJson(pvc, V1PersistentVolumeClaim.class);
String volumeName = provisioner.createVolumeName(claim);
V1PersistentVolume volume =
provisioner.persitenceVolumeBuilder(claim, volumeName);
//remove the data which should not been compared
V1PersistentVolume expectedVolume =
json.getGson().fromJson(pv, V1PersistentVolume.class);
Assert.assertEquals(expectedVolume, volume);
}
}

View File

@ -1,73 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.util;
import org.apache.hadoop.cblock.meta.ContainerDescriptor;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException;
import java.util.concurrent.ConcurrentHashMap;
/**
* NOTE : This class is only for testing purpose.
*
* Mock an underlying container storage layer, expose to CBlock to perform
* IO. While in this mock implementation, a container is nothing more than
* a in memory hashmap.
*
* This is to allow volume creation call and perform standalone tests.
*/
public final class ContainerLookUpService {
private static ConcurrentHashMap<String, ContainerDescriptor>
containers = new ConcurrentHashMap<>();
/**
* Return an *existing* container with given Id.
*
* TODO : for testing purpose, return a new container if the given Id
* is not found
*
* found
* @param containerID
* @return the corresponding pipeline instance (create if not exist)
*/
public static ContainerDescriptor lookUp(String containerID)
throws IOException {
if (!containers.containsKey(containerID)) {
Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(
containerID);
ContainerDescriptor cd = new ContainerDescriptor(containerID);
cd.setPipeline(pipeline);
containers.put(containerID, cd);
}
return containers.get(containerID);
}
public static void addContainer(String containerID) throws IOException {
Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(
containerID);
ContainerDescriptor cd = new ContainerDescriptor(containerID);
cd.setPipeline(pipeline);
containers.put(containerID, cd);
}
private ContainerLookUpService() {
}
}

View File

@ -1,176 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.util;
import org.apache.hadoop.cblock.meta.ContainerDescriptor;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
/**
* This class is the one that directly talks to SCM server.
*
* NOTE : this is only a mock class, only to allow testing volume
* creation without actually creating containers. In real world, need to be
* replaced with actual container look up calls.
*
*/
public class MockStorageClient implements ScmClient {
private static AtomicInteger currentContainerId =
new AtomicInteger(0);
/**
* Ask SCM to get a exclusive container.
*
* @return A container descriptor object to locate this container
* @throws Exception
*/
@Override
public Pipeline createContainer(String containerId, String owner)
throws IOException {
int contId = currentContainerId.getAndIncrement();
ContainerLookUpService.addContainer(Long.toString(contId));
return ContainerLookUpService.lookUp(Long.toString(contId))
.getPipeline();
}
/**
* As this is only a testing class, with all "container" maintained in
* memory, no need to really delete anything for now.
* @throws IOException
*/
@Override
public void deleteContainer(Pipeline pipeline, boolean force)
throws IOException {
}
/**
* This is a mock class, so returns the container infos of start container
* and end container.
*
* @param startName start container name.
* @param prefixName prefix container name.
* @param count count.
* @return a list of pipeline.
* @throws IOException
*/
@Override
public List<ContainerInfo> listContainer(String startName,
String prefixName, int count) throws IOException {
List<ContainerInfo> containerList = new ArrayList<>();
ContainerDescriptor containerDescriptor =
ContainerLookUpService.lookUp(startName);
ContainerInfo container = new ContainerInfo.Builder()
.setContainerName(containerDescriptor.getContainerID())
.setPipeline(containerDescriptor.getPipeline())
.setState(HddsProtos.LifeCycleState.ALLOCATED)
.build();
containerList.add(container);
return containerList;
}
/**
* Create a instance of ContainerData by a given container id,
* since this is a testing class, there is no need set up the hold
* env to get the meta data of the container.
* @param pipeline
* @return
* @throws IOException
*/
@Override
public ContainerData readContainer(Pipeline pipeline) throws IOException {
return ContainerData.newBuilder()
.setName(pipeline.getContainerName())
.build();
}
/**
* Return reference to an *existing* container with given ID.
*
* @param containerId
* @return
* @throws IOException
*/
public Pipeline getContainer(String containerId)
throws IOException {
return ContainerLookUpService.lookUp(containerId).getPipeline();
}
@Override
public void closeContainer(Pipeline container) throws IOException {
// Do nothing, because the mock container does not have the notion of
// "open" and "close".
}
@Override
public long getContainerSize(Pipeline pipeline) throws IOException {
// just return a constant value for now
return 5L * OzoneConsts.GB; // 5GB
}
@Override
public Pipeline createContainer(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor replicationFactor, String containerId,
String owner) throws IOException {
int contId = currentContainerId.getAndIncrement();
ContainerLookUpService.addContainer(Long.toString(contId));
return ContainerLookUpService.lookUp(Long.toString(contId))
.getPipeline();
}
/**
* Returns a set of Nodes that meet a query criteria.
*
* @param nodeStatuses - A set of criteria that we want the node to have.
* @param queryScope - Query scope - Cluster or pool.
* @param poolName - if it is pool, a pool name is required.
* @return A set of nodes that meet the requested criteria.
* @throws IOException
*/
@Override
public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
throws IOException {
return null;
}
/**
* Creates a specified replication pipeline.
*
* @param type - Type
* @param factor - Replication factor
* @param nodePool - Set of machines.
* @throws IOException
*/
@Override
public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException {
return null;
}
}

View File

@ -1,54 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
{
"apiVersion": "v1",
"kind": "PersistentVolume",
"metadata": {
"annotations": {
"volume.beta.kubernetes.io/storage-class": "cblock",
"pv.kubernetes.io/provisioned-by": "hadoop.apache.org/cblock"
},
"name": "volume1-b65d053d-f92e-11e7-be3b-84b261c34638",
"namespace": "ns"
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"capacity": {
"storage": "1Gi"
},
"claimRef": {
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"name": "volume1",
"namespace": "ns",
"uid": "b65d053d-f92e-11e7-be3b-84b261c34638"
},
"iscsi": {
"fsType": "ext4",
"iqn": "iqn.2001-04.org.apache.hadoop:volume1-b65d053d-f92e-11e7-be3b-84b261c34638",
"lun": 0,
"portals": [
"1.2.3.4:3260"
],
"targetPortal": "1.2.3.4:3260"
},
"persistentVolumeReclaimPolicy": "Delete"
}
}

View File

@ -1,55 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
{
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
"annotations": {
"pv.kubernetes.io/bind-completed": "yes",
"pv.kubernetes.io/bound-by-controller": "yes",
"volume.beta.kubernetes.io/storage-provisioner": "hadoop.apache.org/cblock"
},
"creationTimestamp": "2018-01-14T13:27:48Z",
"name": "volume1",
"namespace": "ns",
"resourceVersion": "5532691",
"selfLink": "/api/v1/namespaces/demo1/persistentvolumeclaims/persistent",
"uid": "b65d053d-f92e-11e7-be3b-84b261c34638"
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "1Gi"
}
},
"storageClassName": "cblock",
"volumeName": "persistent-b65d053d-f92e-11e7-be3b-84b261c34638"
},
"status": {
"accessModes": [
"ReadWriteOnce"
],
"capacity": {
"storage": "1Gi"
},
"phase": "Bound"
}
}

View File

@ -1,42 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cblock</artifactId>
<version>3.2.0-SNAPSHOT</version>
</parent>
<artifactId>hadoop-cblock-tools</artifactId>
<version>3.2.0-SNAPSHOT</version>
<description>Apache Hadoop CBlock Tools</description>
<name>Apache Hadoop CBlock Tools</name>
<packaging>jar</packaging>
<properties>
<hadoop.component>cblock</hadoop.component>
<is.hadoop.component>true</is.hadoop.component>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cblock-server</artifactId>
</dependency>
</dependencies>
</project>

View File

@ -1,265 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.cli;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.cblock.CblockUtils;
import org.apache.hadoop.cblock.client.CBlockVolumeClient;
import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.List;
/**
* The command line tool class.
*/
public class CBlockCli extends Configured implements Tool {
private static final String CREATE_VOLUME = "createVolume";
private static final String DELETE_VOLUME = "deleteVolume";
private static final String INFO_VOLUME = "infoVolume";
private static final String LIST_VOLUME = "listVolume";
private static final String SERVER_ADDR = "serverAddr";
private static final String HELP = "help";
private static final Logger LOG =
LoggerFactory.getLogger(CBlockCli.class);
private OzoneConfiguration conf;
private PrintStream printStream;
private Options options;
private BasicParser parser;
private CBlockVolumeClient localProxy;
public CBlockCli(OzoneConfiguration conf, PrintStream printStream)
throws IOException {
this.printStream = printStream;
this.conf = conf;
this.options = getOptions();
this.parser = new BasicParser();
}
public CBlockCli(OzoneConfiguration conf) throws IOException{
this(conf, System.out);
}
private CommandLine parseArgs(String[] argv)
throws ParseException {
return parser.parse(options, argv);
}
private static Options getOptions() {
Options options = new Options();
Option serverAddress = OptionBuilder
.withArgName("serverAddress>:<serverPort")
.withLongOpt(SERVER_ADDR)
.withValueSeparator(':')
.hasArgs(2)
.withDescription("specify server address:port")
.create("s");
options.addOption(serverAddress);
// taking 4 args: userName, volumeName, volumeSize, blockSize
Option createVolume = OptionBuilder
.withArgName("user> <volume> <volumeSize in [GB/TB]> <blockSize")
.withLongOpt(CREATE_VOLUME)
.withValueSeparator(' ')
.hasArgs(4)
.withDescription("create a fresh new volume")
.create("c");
options.addOption(createVolume);
// taking 2 args: userName, volumeName
Option deleteVolume = OptionBuilder
.withArgName("user> <volume")
.withLongOpt(DELETE_VOLUME)
.hasArgs(2)
.withDescription("delete a volume")
.create("d");
options.addOption(deleteVolume);
// taking 2 args: userName, volumeName
Option infoVolume = OptionBuilder
.withArgName("user> <volume")
.withLongOpt(INFO_VOLUME)
.hasArgs(2)
.withDescription("info a volume")
.create("i");
options.addOption(infoVolume);
// taking 1 arg: userName
Option listVolume = OptionBuilder
.withArgName("user")
.withLongOpt(LIST_VOLUME)
.hasOptionalArgs(1)
.withDescription("list all volumes")
.create("l");
options.addOption(listVolume);
Option help = OptionBuilder
.withLongOpt(HELP)
.withDescription("help")
.create("h");
options.addOption(help);
return options;
}
@Override
public int run(String[] args) throws ParseException, IOException {
CommandLine commandLine = parseArgs(args);
if (commandLine.hasOption("s")) {
String[] serverAddrArgs = commandLine.getOptionValues("s");
LOG.info("server address" + Arrays.toString(serverAddrArgs));
String serverHost = serverAddrArgs[0];
int serverPort = Integer.parseInt(serverAddrArgs[1]);
InetSocketAddress serverAddress =
new InetSocketAddress(serverHost, serverPort);
this.localProxy = new CBlockVolumeClient(conf, serverAddress);
} else {
this.localProxy = new CBlockVolumeClient(conf);
}
if (commandLine.hasOption("h")) {
LOG.info("help");
help();
}
if (commandLine.hasOption("c")) {
String[] createArgs = commandLine.getOptionValues("c");
LOG.info("create volume:" + Arrays.toString(createArgs));
createVolume(createArgs);
}
if (commandLine.hasOption("d")) {
String[] deleteArgs = commandLine.getOptionValues("d");
LOG.info("delete args:" + Arrays.toString(deleteArgs));
deleteVolume(deleteArgs);
}
if (commandLine.hasOption("l")) {
String[] listArg = commandLine.getOptionValues("l");
LOG.info("list args:" + Arrays.toString(listArg));
listVolume(listArg);
}
if (commandLine.hasOption("i")) {
String[] infoArgs = commandLine.getOptionValues("i");
LOG.info("info args:" + Arrays.toString(infoArgs));
infoVolume(infoArgs);
}
return 0;
}
public static void main(String[] argv) throws Exception {
CblockUtils.activateConfigs();
OzoneConfiguration cblockConf = new OzoneConfiguration();
RPC.setProtocolEngine(cblockConf, CBlockServiceProtocolPB.class,
ProtobufRpcEngine.class);
int res = 0;
Tool shell = new CBlockCli(cblockConf, System.out);
try {
ToolRunner.run(shell, argv);
} catch (Exception ex) {
LOG.error(ex.toString());
res = 1;
}
System.exit(res);
}
private void createVolume(String[] createArgs) throws IOException {
String userName = createArgs[0];
String volumeName = createArgs[1];
long volumeSize = CblockUtils.parseSize(createArgs[2]);
int blockSize = Integer.parseInt(createArgs[3])*1024;
localProxy.createVolume(userName, volumeName, volumeSize, blockSize);
}
private void deleteVolume(String[] deleteArgs) throws IOException {
String userName = deleteArgs[0];
String volumeName = deleteArgs[1];
boolean force = false;
if (deleteArgs.length > 2) {
force = Boolean.parseBoolean(deleteArgs[2]);
}
localProxy.deleteVolume(userName, volumeName, force);
}
private void infoVolume(String[] infoArgs) throws IOException {
String userName = infoArgs[0];
String volumeName = infoArgs[1];
VolumeInfo volumeInfo = localProxy.infoVolume(userName, volumeName);
printStream.println(volumeInfo.toString());
}
private void listVolume(String[] listArgs) throws IOException {
StringBuilder stringBuilder = new StringBuilder();
List<VolumeInfo> volumeResponse;
if (listArgs == null) {
volumeResponse = localProxy.listVolume(null);
} else {
volumeResponse = localProxy.listVolume(listArgs[0]);
}
for (int i = 0; i<volumeResponse.size(); i++) {
stringBuilder.append(
String.format("%s:%s\t%d\t%d", volumeResponse.get(i).getUserName(),
volumeResponse.get(i).getVolumeName(),
volumeResponse.get(i).getVolumeSize(),
volumeResponse.get(i).getBlockSize()));
if (i < volumeResponse.size() - 1) {
stringBuilder.append("\n");
}
}
printStream.println(stringBuilder);
}
private void help() {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(100, "cblock", "", options, "");
}
}

View File

@ -1,18 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock.cli;

View File

@ -1,242 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cblock;
import org.apache.hadoop.cblock.cli.CBlockCli;
import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.cblock.util.MockStorageClient;
import org.apache.hadoop.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.List;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_ADDRESS_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* A testing class for cblock command line tool.
*/
public class TestCBlockCLI {
private static final long GB = 1 * 1024 * 1024 * 1024L;
private static final int KB = 1024;
private static CBlockCli cmd;
private static OzoneConfiguration conf;
private static CBlockManager cBlockManager;
private static ByteArrayOutputStream outContent;
private static PrintStream testPrintOut;
@BeforeClass
public static void setup() throws IOException {
outContent = new ByteArrayOutputStream();
ScmClient storageClient = new MockStorageClient();
conf = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestCBlockCLI.class.getSimpleName());
File filePath = new File(path);
if (!filePath.exists() && !filePath.mkdirs()) {
throw new IOException("Unable to create test DB dir");
}
conf.set(DFS_CBLOCK_SERVICERPC_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY, path.concat(
"/testCblockCli.dat"));
cBlockManager = new CBlockManager(conf, storageClient);
cBlockManager.start();
testPrintOut = new PrintStream(outContent);
cmd = new CBlockCli(conf, testPrintOut);
}
@AfterClass
public static void clean() {
if (cBlockManager != null) {
cBlockManager.stop();
cBlockManager.join();
cBlockManager.clean();
}
}
@After
public void reset() {
outContent.reset();
}
/**
* Test the help command.
* @throws Exception
*/
@Test
public void testCliHelp() throws Exception {
PrintStream initialStdOut = System.out;
System.setOut(testPrintOut);
String[] args = {"-h"};
cmd.run(args);
String helpPrints =
"usage: cblock\n" +
" -c,--createVolume <user> <volume> <volumeSize in [GB/TB]> " +
"<blockSize> create a fresh new volume\n" +
" -d,--deleteVolume <user> <volume> " +
" delete a volume\n" +
" -h,--help " +
" help\n" +
" -i,--infoVolume <user> <volume> " +
" info a volume\n" +
" -l,--listVolume <user> " +
" list all volumes\n" +
" -s,--serverAddr <serverAddress>:<serverPort> " +
" specify server address:port\n";
assertEquals(helpPrints, outContent.toString());
outContent.reset();
System.setOut(initialStdOut);
}
/**
* Test volume listing command.
* @throws Exception
*/
@Test
public void testCliList() throws Exception {
String userName0 = "userTestCliList0";
String userName1 = "userTestCliList1";
String userTestNotExist = "userTestNotExist";
String volumeName0 = "volumeTest0";
String volumeName1 = "volumeTest1";
String volumeSize0 = "30GB";
String volumeSize1 = "40GB";
String blockSize = Integer.toString(4);
String[] argsCreate0 =
{"-c", userName0, volumeName0, volumeSize0, blockSize};
cmd.run(argsCreate0);
String[] argsCreate1 =
{"-c", userName0, volumeName1, volumeSize1, blockSize};
cmd.run(argsCreate1);
String[] argsCreate2 =
{"-c", userName1, volumeName0, volumeSize0, blockSize};
cmd.run(argsCreate2);
String[] argsList0 = {"-l"};
cmd.run(argsList0);
String[] outExpected1 = {
"userTestCliList1:volumeTest0\t32212254720\t4096\n",
"userTestCliList0:volumeTest0\t32212254720\t4096\n",
"userTestCliList0:volumeTest1\t42949672960\t4096\n"};
int length = 0;
for (String str : outExpected1) {
assertTrue(outContent.toString().contains(str));
length += str.length();
}
assertEquals(length, outContent.toString().length());
outContent.reset();
String[] argsList1 = {"-l", userName1};
cmd.run(argsList1);
String outExpected2 = "userTestCliList1:volumeTest0\t32212254720\t4096\n";
assertEquals(outExpected2, outContent.toString());
outContent.reset();
String[] argsList2 = {"-l", userTestNotExist};
cmd.run(argsList2);
String outExpected3 = "\n";
assertEquals(outExpected3, outContent.toString());
}
/**
* Test create volume command.
* @throws Exception
*/
@Test
public void testCliCreate() throws Exception {
String userName = "userTestCliCreate";
String volumeName = "volumeTest";
String volumeSize = "30GB";
String blockSize = "4";
String[] argsCreate = {"-c", userName, volumeName, volumeSize, blockSize};
cmd.run(argsCreate);
List<VolumeDescriptor> allVolumes = cBlockManager.getAllVolumes(userName);
assertEquals(1, allVolumes.size());
VolumeDescriptor volume = allVolumes.get(0);
assertEquals(userName, volume.getUserName());
assertEquals(volumeName, volume.getVolumeName());
long volumeSizeB = volume.getVolumeSize();
assertEquals(30, (int)(volumeSizeB/ GB));
assertEquals(4, volume.getBlockSize()/ KB);
}
/**
* Test delete volume command.
* @throws Exception
*/
@Test
public void testCliDelete() throws Exception {
String userName = "userTestCliDelete";
String volumeName = "volumeTest";
String volumeSize = "30GB";
String blockSize = "4";
String[] argsCreate = {"-c", userName, volumeName, volumeSize, blockSize};
cmd.run(argsCreate);
List<VolumeDescriptor> allVolumes = cBlockManager.getAllVolumes(userName);
assertEquals(1, allVolumes.size());
VolumeDescriptor volume = allVolumes.get(0);
assertEquals(userName, volume.getUserName());
assertEquals(volumeName, volume.getVolumeName());
long volumeSizeB = volume.getVolumeSize();
assertEquals(30, (int)(volumeSizeB/ GB));
assertEquals(4, volume.getBlockSize()/ KB);
String[] argsDelete = {"-d", userName, volumeName};
cmd.run(argsDelete);
allVolumes = cBlockManager.getAllVolumes(userName);
assertEquals(0, allVolumes.size());
}
/**
* Test info volume command.
* @throws Exception
*/
@Test
public void testCliInfoVolume() throws Exception {
String userName0 = "userTestCliInfo";
String volumeName0 = "volumeTest0";
String volumeSize = "8000GB";
String blockSize = "4";
String[] argsCreate0 = {
"-c", userName0, volumeName0, volumeSize, blockSize};
cmd.run(argsCreate0);
String[] argsInfo = {"-i", userName0, volumeName0};
cmd.run(argsInfo);
// TODO : the usage field is not implemented yet, always 0 now.
String outExpected = " userName:userTestCliInfo " +
"volumeName:volumeTest0 " +
"volumeSize:8589934592000 " +
"blockSize:4096 (sizeInBlocks:2097152000) usageInBlocks:0\n";
assertEquals(outExpected, outContent.toString());
}
}

View File

@ -600,8 +600,6 @@ function hadoop_bootstrap
HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
CBLOCK_DIR=${CBLOCK_DIR:-"share/hadoop/cblock"}
CBLOCK_LIB_JARS_DIR=${CBLOCK_LIB_JARS_DIR:-"share/hadoop/cblock/lib"}
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}

View File

@ -237,10 +237,6 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-tools</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cblock-server</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-container-service</artifactId>
@ -257,10 +253,6 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-tools</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cblock-tools</artifactId>
</dependency>
</dependencies>
<build>
<plugins>

View File

@ -1,17 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION=${project.version}

View File

@ -1,42 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# CBlock dozone configuration
This directory contains example cluster definition for CBlock/jscsi servers.
## How to use
1. First of all Start the servers with `docker-compose up -d`
2. Wait until the servers are up and running (check http://localhost:9876 and wait until you have a healthy node)
3. Create a volume: `docker-compose exec cblock hdfs cblock -c bilbo volume2 1GB 4`
4. Mount the iscsi volume (from host machine):
```
sudo iscsiadm -m node -o new -T bilbo:volume2 -p 127.0.0.1
sudo iscsiadm -m node -T bilbo:volume2 --login
```
5. Check the device name from `dmesg` or `lsblk` (eg /dev/sdd). Errors in dmesg could be ignored: jscsi doesn't implement all the jscsi commands.
6. Format the device (`mkfs.ext4 /dev/sdd`). (Yes, format the while device, not just a partition).
7. Mount it (`mount /dev/sdd /mnt/target`).

View File

@ -1,66 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3"
services:
namenode:
image: apache/hadoop-runner
hostname: namenode
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
ports:
- 9870:9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
ports:
- 9864
command: ["/opt/hadoop/bin/ozone","datanode"]
env_file:
- ./docker-config
jscsi:
image: apache/hadoop-runner
ports:
- 3260:3260
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/ozone","jscsi"]
cblock:
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/ozone","cblockserver"]
scm:
image: apache/hadoop-runner
volumes:
- ../..//hadoop-${VERSION}:/opt/hadoop
ports:
- 9876:9876
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/ozone","scm"]
environment:
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION

View File

@ -1,39 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
LOG4J.PROPERTIES_log4j.rootLogger=info,stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n

View File

@ -33,8 +33,6 @@ function hadoop_usage
hadoop_add_option "--workers" "turn on worker mode"
hadoop_add_subcommand "cblock" admin "cblock CLI"
hadoop_add_subcommand "cblockserver" daemon "run cblock server"
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
@ -43,7 +41,6 @@ function hadoop_usage
hadoop_add_subcommand "getozoneconf" client "get ozone config values from
configuration"
hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server"
hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
hadoop_add_subcommand "o3" client "command line interface for ozone"
hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data"
@ -65,13 +62,6 @@ function ozonecmd_case
shift
case ${subcmd} in
cblock)
HADOOP_CLASSNAME=org.apache.hadoop.cblock.cli.CBlockCli
;;
cblockserver)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME=org.apache.hadoop.cblock.CBlockManager
;;
classpath)
hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
;;
@ -106,10 +96,6 @@ function ozonecmd_case
getozoneconf)
HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
;;
jscsi)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME=org.apache.hadoop.cblock.jscsiHelper.SCSITargetDaemon
;;
ksm)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager

View File

@ -19,7 +19,7 @@ if [[ "${HADOOP_SHELL_EXECNAME}" = ozone ]]; then
fi
## @description Profile for hdds/cblock/ozone components.
## @description Profile for hdds/ozone components.
## @audience private
## @stability evolving
function _ozone_hadoop_classpath
@ -40,7 +40,5 @@ function _ozone_hadoop_classpath
hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDDS_DIR}"'/*'
hadoop_add_classpath "${HADOOP_HDFS_HOME}/${OZONE_LIB_JARS_DIR}"'/*'
hadoop_add_classpath "${HADOOP_HDFS_HOME}/${OZONE_DIR}"'/*'
hadoop_add_classpath "${HADOOP_HDFS_HOME}/${CBLOCK_LIB_JARS_DIR}"'/*'
hadoop_add_classpath "${HADOOP_HDFS_HOME}/${CBLOCK_DIR}"'/*'
}

View File

@ -583,11 +583,6 @@
<artifactId>hadoop-hdds-tools</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cblock-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-tools</artifactId>
@ -600,14 +595,6 @@
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cblock-tools</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-server-framework</artifactId>

View File

@ -747,7 +747,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
</activation>
<modules>
<module>hadoop-ozone</module>
<module>hadoop-cblock</module>
<module>hadoop-hdds</module>
<module>hadoop-ozone/acceptance-test</module>
</modules>