HDFS-13405. Ozone: Rename HDSL to HDDS.

Contributed by Ajay Kumar, Elek Marton, Mukul Kumar Singh, Shashikant Banerjee and Anu Engineer.
This commit is contained in:
Anu Engineer 2018-04-05 11:24:39 -07:00 committed by Owen O'Malley
parent 792ac4d08b
commit 8b832f3c35
486 changed files with 3922 additions and 2721 deletions

View File

@ -51,7 +51,7 @@
<exclude>**/file:/**</exclude> <exclude>**/file:/**</exclude>
<exclude>**/SecurityAuth.audit*</exclude> <exclude>**/SecurityAuth.audit*</exclude>
<exclude>hadoop-ozone/**</exclude> <exclude>hadoop-ozone/**</exclude>
<exclude>hadoop-hdsl/**</exclude> <exclude>hadoop-hdds/**</exclude>
<exclude>hadoop-cblock/**</exclude> <exclude>hadoop-cblock/**</exclude>
</excludes> </excludes>
</fileSet> </fileSet>

View File

@ -37,7 +37,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-server-framework</artifactId> <artifactId>hadoop-hdds-server-framework</artifactId>
</dependency> </dependency>
<dependency> <dependency>
@ -47,12 +47,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-common</artifactId> <artifactId>hadoop-hdds-common</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-client</artifactId> <artifactId>hadoop-hdds-client</artifactId>
</dependency> </dependency>
<dependency> <dependency>
@ -132,7 +132,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
</param> </param>
<param> <param>
${basedir}/../../hadoop-hdsl/common/src/main/proto/ ${basedir}/../../hadoop-hdds/common/src/main/proto/
</param> </param>
<param>${basedir}/src/main/proto</param> <param>${basedir}/src/main/proto</param>
</imports> </imports>

View File

@ -37,24 +37,24 @@ import org.apache.hadoop.cblock.protocolPB
import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.scm.client.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.cblock.storage.StorageManager; import org.apache.hadoop.cblock.storage.StorageManager;
import org.apache.hadoop.cblock.util.KeyUtil; import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.scm.protocolPB import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB; .StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.utils.LevelDBStore; import org.apache.hadoop.utils.LevelDBStore;
import static org.apache.hadoop.cblock.CblockUtils.getCblockServerRpcAddr; import static org.apache.hadoop.cblock.CblockUtils.getCblockServerRpcAddr;
import static org.apache.hadoop.cblock.CblockUtils.getCblockServiceRpcAddr; import static org.apache.hadoop.cblock.CblockUtils.getCblockServiceRpcAddr;
import static org.apache.hadoop.hdsl.server.ServerUtils import static org.apache.hadoop.hdds.server.ServerUtils
.updateRPCListenAddress; .updateRPCListenAddress;
import org.iq80.leveldb.DBIterator; import org.iq80.leveldb.DBIterator;
import org.slf4j.Logger; import org.slf4j.Logger;

View File

@ -36,8 +36,8 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT; .DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys import static org.apache.hadoop.cblock.CBlockConfigKeys
.DFS_CBLOCK_SERVICERPC_PORT_DEFAULT; .DFS_CBLOCK_SERVICERPC_PORT_DEFAULT;
import static org.apache.hadoop.hdsl.HdslUtils.getHostNameFromConfigKeys; import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
import static org.apache.hadoop.hdsl.HdslUtils.getPortNumberFromConfigKeys; import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
/** /**
* Generic stateless utility functions for CBlock components. * Generic stateless utility functions for CBlock components.

View File

@ -25,7 +25,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.cblock.CBlockConfigKeys; import org.apache.hadoop.cblock.CBlockConfigKeys;
import org.apache.hadoop.cblock.meta.VolumeInfo; import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB; import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;

View File

@ -21,9 +21,9 @@ import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs; import com.google.common.primitives.Longs;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.AsyncBlockWriter; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.AsyncBlockWriter;
import org.apache.hadoop.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore; import org.apache.hadoop.utils.LevelDBStore;

View File

@ -39,7 +39,7 @@ import org.apache.hadoop.cblock.protocolPB.CBlockClientServerProtocolPB;
import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;

View File

@ -23,8 +23,8 @@ import org.apache.hadoop.cblock.jscsiHelper.cache.CacheModule;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.jscsi.target.storage.IStorageModule; import org.jscsi.target.storage.IStorageModule;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.cblock.jscsiHelper;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.cblock.proto.MountVolumeResponse; import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.util.KeyUtil; import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.jscsi.target.Configuration; import org.jscsi.target.Configuration;
import org.jscsi.target.Target; import org.jscsi.target.Target;
import org.jscsi.target.TargetServer; import org.jscsi.target.TargetServer;

View File

@ -26,8 +26,8 @@ import org.apache.hadoop.cblock.jscsiHelper.cache.impl.AsyncBlockWriter;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.DiskBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.DiskBlock;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore; import org.apache.hadoop.utils.LevelDBStore;
import org.iq80.leveldb.Options; import org.iq80.leveldb.Options;

View File

@ -31,9 +31,9 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.client.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.jscsi.target.Configuration; import org.jscsi.target.Configuration;
@ -47,14 +47,14 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_PORT_DE
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_PORT_KEY; import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_PORT_KEY;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT; import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT;
import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY; import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
/** /**
* This class runs the target server process. * This class runs the target server process.

View File

@ -22,10 +22,10 @@ import com.google.common.primitives.Longs;
import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.utils.LevelDBStore; import org.apache.hadoop.utils.LevelDBStore;
import org.slf4j.Logger; import org.slf4j.Logger;

View File

@ -24,8 +24,8 @@ import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.CacheModule; import org.apache.hadoop.cblock.jscsiHelper.cache.CacheModule;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics; import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.utils.LevelDBStore; import org.apache.hadoop.utils.LevelDBStore;
import org.slf4j.Logger; import org.slf4j.Logger;

View File

@ -22,10 +22,10 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
import org.apache.hadoop.utils.LevelDBStore; import org.apache.hadoop.utils.LevelDBStore;

View File

@ -38,7 +38,7 @@ import org.apache.hadoop.cblock.CblockUtils;
import org.apache.hadoop.cblock.exception.CBlockException; import org.apache.hadoop.cblock.exception.CBlockException;
import org.apache.hadoop.cblock.proto.MountVolumeResponse; import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.storage.StorageManager; import org.apache.hadoop.cblock.storage.StorageManager;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting; import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.cblock.meta;
import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos; import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
/** /**
* *

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.cblock.meta;
import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos; import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.cblock.proto; package org.apache.hadoop.cblock.proto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;

View File

@ -25,7 +25,7 @@ import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos; import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos;
import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos; import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;

View File

@ -25,10 +25,10 @@ import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.cblock.meta.VolumeInfo; import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.cblock.proto.MountVolumeResponse; import org.apache.hadoop.cblock.proto.MountVolumeResponse;
import org.apache.hadoop.cblock.util.KeyUtil; import org.apache.hadoop.cblock.util.KeyUtil;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -187,8 +187,8 @@ public class StorageManager {
ContainerDescriptor container = null; ContainerDescriptor container = null;
try { try {
Pipeline pipeline = storageClient.createContainer( Pipeline pipeline = storageClient.createContainer(
HdslProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationType.STAND_ALONE,
HdslProtos.ReplicationFactor.ONE, HddsProtos.ReplicationFactor.ONE,
KeyUtil.getContainerName(volume.getUserName(), KeyUtil.getContainerName(volume.getUserName(),
volume.getVolumeName(), containerIdx), cblockId); volume.getVolumeName(), containerIdx), cblockId);

View File

@ -27,7 +27,7 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true; option java_generate_equals_and_hash = true;
package hadoop.cblock; package hadoop.cblock;
import "hdsl.proto"; import "hdds.proto";
import "CBlockServiceProtocol.proto"; import "CBlockServiceProtocol.proto";
/** /**
* This message is sent from CBlock client side to CBlock server to * This message is sent from CBlock client side to CBlock server to
@ -69,7 +69,7 @@ message ContainerIDProto {
required string containerID = 1; required string containerID = 1;
required uint64 index = 2; required uint64 index = 2;
// making pipeline optional to be compatible with exisiting tests // making pipeline optional to be compatible with exisiting tests
optional hadoop.hdsl.Pipeline pipeline = 3; optional hadoop.hdds.Pipeline pipeline = 3;
} }

View File

@ -23,13 +23,13 @@ import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics; import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher; import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;

View File

@ -24,21 +24,21 @@ import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher; import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB; .StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;

View File

@ -19,8 +19,8 @@ package org.apache.hadoop.cblock;
import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.cblock.meta.VolumeInfo; import org.apache.hadoop.cblock.meta.VolumeInfo;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.cblock.util.MockStorageClient; import org.apache.hadoop.cblock.util.MockStorageClient;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.cblock; package org.apache.hadoop.cblock;
import org.apache.hadoop.cblock.meta.VolumeDescriptor; import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.cblock.util.MockStorageClient; import org.apache.hadoop.cblock.util.MockStorageClient;
import org.junit.Test; import org.junit.Test;

View File

@ -25,16 +25,16 @@ import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics;
import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher; import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher;
import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock;
import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.junit.AfterClass; import org.junit.AfterClass;

View File

@ -29,7 +29,7 @@ import org.junit.Test;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Paths; import java.nio.file.Paths;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
/** /**
* Test the resource generation of Dynamic Provisioner. * Test the resource generation of Dynamic Provisioner.

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.cblock.util;
import org.apache.hadoop.cblock.meta.ContainerDescriptor; import org.apache.hadoop.cblock.meta.ContainerDescriptor;
import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;

View File

@ -18,12 +18,12 @@
package org.apache.hadoop.cblock.util; package org.apache.hadoop.cblock.util;
import org.apache.hadoop.cblock.meta.ContainerDescriptor; import org.apache.hadoop.cblock.meta.ContainerDescriptor;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -88,7 +88,7 @@ public class MockStorageClient implements ScmClient {
ContainerInfo container = new ContainerInfo.Builder() ContainerInfo container = new ContainerInfo.Builder()
.setContainerName(containerDescriptor.getContainerID()) .setContainerName(containerDescriptor.getContainerID())
.setPipeline(containerDescriptor.getPipeline()) .setPipeline(containerDescriptor.getPipeline())
.setState(HdslProtos.LifeCycleState.ALLOCATED) .setState(HddsProtos.LifeCycleState.ALLOCATED)
.build(); .build();
containerList.add(container); containerList.add(container);
return containerList; return containerList;
@ -134,8 +134,8 @@ public class MockStorageClient implements ScmClient {
} }
@Override @Override
public Pipeline createContainer(HdslProtos.ReplicationType type, public Pipeline createContainer(HddsProtos.ReplicationType type,
HdslProtos.ReplicationFactor replicationFactor, String containerId, HddsProtos.ReplicationFactor replicationFactor, String containerId,
String owner) throws IOException { String owner) throws IOException {
int contId = currentContainerId.getAndIncrement(); int contId = currentContainerId.getAndIncrement();
ContainerLookUpService.addContainer(Long.toString(contId)); ContainerLookUpService.addContainer(Long.toString(contId));
@ -153,8 +153,8 @@ public class MockStorageClient implements ScmClient {
* @throws IOException * @throws IOException
*/ */
@Override @Override
public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
nodeStatuses, HdslProtos.QueryScope queryScope, String poolName) nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
throws IOException { throws IOException {
return null; return null;
} }
@ -168,8 +168,8 @@ public class MockStorageClient implements ScmClient {
* @throws IOException * @throws IOException
*/ */
@Override @Override
public Pipeline createReplicationPipeline(HdslProtos.ReplicationType type, public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool) HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException { throws IOException {
return null; return null;
} }

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;

View File

@ -21,7 +21,7 @@ import org.apache.hadoop.cblock.cli.CBlockCli;
import org.apache.hadoop.cblock.meta.VolumeDescriptor; import org.apache.hadoop.cblock.meta.VolumeDescriptor;
import org.apache.hadoop.cblock.util.MockStorageClient; import org.apache.hadoop.cblock.util.MockStorageClient;
import org.apache.hadoop.conf.OzoneConfiguration; import org.apache.hadoop.conf.OzoneConfiguration;
import org.apache.hadoop.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;

View File

@ -596,8 +596,8 @@ function hadoop_bootstrap
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"} YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"} MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"} MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
HDSL_DIR=${HDSL_DIR:-"share/hadoop/hdsl"} HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"}
HDSL_LIB_JARS_DIR=${HDSL_LIB_JARS_DIR:-"share/hadoop/hdsl/lib"} HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"} OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"} OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
CBLOCK_DIR=${CBLOCK_DIR:-"share/hadoop/cblock"} CBLOCK_DIR=${CBLOCK_DIR:-"share/hadoop/cblock"}

View File

@ -219,7 +219,7 @@
</profile> </profile>
<profile> <profile>
<id>hdsl</id> <id>hdds</id>
<activation> <activation>
<activeByDefault>false</activeByDefault> <activeByDefault>false</activeByDefault>
</activation> </activation>
@ -231,11 +231,11 @@
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-server-scm</artifactId> <artifactId>hadoop-hdds-server-scm</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-tools</artifactId> <artifactId>hadoop-hdds-tools</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
@ -243,7 +243,7 @@
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-container-service</artifactId> <artifactId>hadoop-hdds-container-service</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
@ -251,7 +251,7 @@
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-tools</artifactId> <artifactId>hadoop-hdds-tools</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>

View File

@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HdslDatanodeService HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true

View File

@ -27,7 +27,7 @@ HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HdslDatanodeService HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

View File

@ -19,24 +19,24 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl</artifactId> <artifactId>hadoop-hdds</artifactId>
<version>3.2.0-SNAPSHOT</version> <version>3.2.0-SNAPSHOT</version>
</parent> </parent>
<artifactId>hadoop-hdsl-client</artifactId> <artifactId>hadoop-hdds-client</artifactId>
<version>3.2.0-SNAPSHOT</version> <version>3.2.0-SNAPSHOT</version>
<description>Apache Hadoop HDSL Client libraries</description> <description>Apache Hadoop Distributed Data Store Client libraries</description>
<name>Apache Hadoop HDSL Client</name> <name>Apache HDDS Client</name>
<packaging>jar</packaging> <packaging>jar</packaging>
<properties> <properties>
<hadoop.component>hdsl</hadoop.component> <hadoop.component>hdds</hadoop.component>
<is.hadoop.component>true</is.hadoop.component> <is.hadoop.component>true</is.hadoop.component>
</properties> </properties>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl-common</artifactId> <artifactId>hadoop-hdds-common</artifactId>
<scope>provided</scope> <scope>provided</scope>
</dependency> </dependency>

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -28,19 +28,19 @@ import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler; import io.netty.handler.logging.LoggingHandler;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.List;
import java.util.concurrent.Semaphore; import java.util.concurrent.Semaphore;
/** /**
@ -69,7 +69,7 @@ public class XceiverClient extends XceiverClientSpi {
this.pipeline = pipeline; this.pipeline = pipeline;
this.config = config; this.config = config;
this.semaphore = this.semaphore =
new Semaphore(OzoneClientUtils.getMaxOutstandingRequests(config)); new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
} }
@Override @Override
@ -186,7 +186,7 @@ public class XceiverClient extends XceiverClientSpi {
* @return - Stand Alone as the type. * @return - Stand Alone as the type.
*/ */
@Override @Override
public HdslProtos.ReplicationType getPipelineType() { public HddsProtos.ReplicationType getPipelineType() {
return HdslProtos.ReplicationType.STAND_ALONE; return HddsProtos.ReplicationType.STAND_ALONE;
} }
} }

View File

@ -15,19 +15,17 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import io.netty.channel.Channel; import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.SimpleChannelInboundHandler;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto; .ContainerCommandResponseProto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline; import io.netty.channel.ChannelPipeline;
@ -24,8 +24,8 @@ import io.netty.handler.codec.protobuf.ProtobufDecoder;
import io.netty.handler.codec.protobuf.ProtobufEncoder; import io.netty.handler.codec.protobuf.ProtobufEncoder;
import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder; import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender; import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import java.util.concurrent.Semaphore; import java.util.concurrent.Semaphore;

View File

@ -16,33 +16,32 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.Callable;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.cache.Cache; import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification; import com.google.common.cache.RemovalNotification;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import static org.apache.hadoop.scm.ScmConfigKeys import java.io.Closeable;
.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT; import java.io.IOException;
import static org.apache.hadoop.scm.ScmConfigKeys import java.util.concurrent.Callable;
.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY; import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys
import static org.apache.hadoop.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT; .SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT;
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
.ReplicationType.RATIS; .ReplicationType.RATIS;
/** /**
@ -186,24 +185,24 @@ public class XceiverClientManager implements Closeable {
* Returns hard coded 3 as replication factor. * Returns hard coded 3 as replication factor.
* @return 3 * @return 3
*/ */
public HdslProtos.ReplicationFactor getFactor() { public HddsProtos.ReplicationFactor getFactor() {
if(isUseRatis()) { if(isUseRatis()) {
return HdslProtos.ReplicationFactor.THREE; return HddsProtos.ReplicationFactor.THREE;
} }
return HdslProtos.ReplicationFactor.ONE; return HddsProtos.ReplicationFactor.ONE;
} }
/** /**
* Returns the default replication type. * Returns the default replication type.
* @return Ratis or Standalone * @return Ratis or Standalone
*/ */
public HdslProtos.ReplicationType getType() { public HddsProtos.ReplicationType getType() {
// TODO : Fix me and make Ratis default before release. // TODO : Fix me and make Ratis default before release.
// TODO: Remove this as replication factor and type are pipeline properties // TODO: Remove this as replication factor and type are pipeline properties
if(isUseRatis()) { if(isUseRatis()) {
return HdslProtos.ReplicationType.RATIS; return HddsProtos.ReplicationType.RATIS;
} }
return HdslProtos.ReplicationType.STAND_ALONE; return HddsProtos.ReplicationType.STAND_ALONE;
} }
/** /**

View File

@ -15,10 +15,10 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.annotation.Metrics;

View File

@ -16,17 +16,19 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
import org.apache.hadoop.ozone.client.OzoneClientUtils; .ContainerCommandRequestProto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.ratis.RatisHelper; import org.apache.ratis.RatisHelper;
import org.apache.ratis.client.RaftClient; import org.apache.ratis.client.RaftClient;
import org.apache.ratis.protocol.RaftClientReply; import org.apache.ratis.protocol.RaftClientReply;
@ -60,7 +62,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
final int maxOutstandingRequests = final int maxOutstandingRequests =
OzoneClientUtils.getMaxOutstandingRequests(ozoneConf); HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
return new XceiverClientRatis(pipeline, return new XceiverClientRatis(pipeline,
SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests); SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests);
} }
@ -98,8 +100,8 @@ public final class XceiverClientRatis extends XceiverClientSpi {
* @return - Ratis * @return - Ratis
*/ */
@Override @Override
public HdslProtos.ReplicationType getPipelineType() { public HddsProtos.ReplicationType getPipelineType() {
return HdslProtos.ReplicationType.RATIS; return HddsProtos.ReplicationType.RATIS;
} }
private void reinitialize(List<DatanodeDetails> datanodes, RaftGroup group) private void reinitialize(List<DatanodeDetails> datanodes, RaftGroup group)

View File

@ -15,20 +15,22 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.client; package org.apache.hadoop.hdds.scm.client;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData; import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ReadContainerResponseProto; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.protocolPB
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB; .StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -37,8 +39,10 @@ import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.UUID; import java.util.UUID;
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.ALLOCATED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.OPEN; .ALLOCATED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
.OPEN;
/** /**
* This class provides the client-facing APIs of container operations. * This class provides the client-facing APIs of container operations.
@ -189,8 +193,8 @@ public class ContainerOperationClient implements ScmClient {
* @inheritDoc * @inheritDoc
*/ */
@Override @Override
public Pipeline createContainer(HdslProtos.ReplicationType type, public Pipeline createContainer(HddsProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HddsProtos.ReplicationFactor factor,
String containerId, String owner) throws IOException { String containerId, String owner) throws IOException {
XceiverClientSpi client = null; XceiverClientSpi client = null;
try { try {
@ -229,8 +233,8 @@ public class ContainerOperationClient implements ScmClient {
* @throws IOException * @throws IOException
*/ */
@Override @Override
public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
nodeStatuses, HdslProtos.QueryScope queryScope, String poolName) nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
throws IOException { throws IOException {
return storageContainerLocationClient.queryNode(nodeStatuses, queryScope, return storageContainerLocationClient.queryNode(nodeStatuses, queryScope,
poolName); poolName);
@ -240,8 +244,8 @@ public class ContainerOperationClient implements ScmClient {
* Creates a specified replication pipeline. * Creates a specified replication pipeline.
*/ */
@Override @Override
public Pipeline createReplicationPipeline(HdslProtos.ReplicationType type, public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool) HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException { throws IOException {
return storageContainerLocationClient.createReplicationPipeline(type, return storageContainerLocationClient.createReplicationPipeline(type,
factor, nodePool); factor, nodePool);

View File

@ -16,7 +16,20 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.ozone.client; package org.apache.hadoop.hdds.scm.client;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.ParseException; import java.text.ParseException;
import java.time.Instant; import java.time.Instant;
@ -25,21 +38,6 @@ import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatter;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.ScmConfigKeys;
import com.google.common.base.Preconditions;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Utility methods for Ozone and Container Clients. * Utility methods for Ozone and Container Clients.
* *
@ -49,14 +47,14 @@ import org.slf4j.LoggerFactory;
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Unstable @InterfaceStability.Unstable
public final class OzoneClientUtils { public final class HddsClientUtils {
private static final Logger LOG = LoggerFactory.getLogger( private static final Logger LOG = LoggerFactory.getLogger(
OzoneClientUtils.class); HddsClientUtils.class);
private static final int NO_PORT = -1; private static final int NO_PORT = -1;
private OzoneClientUtils() { private HddsClientUtils() {
} }
/** /**
@ -69,55 +67,28 @@ public final class OzoneClientUtils {
return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE)); return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE));
}); });
/** /**
* Returns the cache value to be used for list calls. * Convert time in millisecond to a human readable format required in ozone.
* @param conf Configuration object * @return a human readable string for the input time
* @return list cache size
*/ */
public static int getListCacheSize(Configuration conf) { public static String formatDateTime(long millis) {
return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE, ZonedDateTime dateTime = ZonedDateTime.ofInstant(
OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT); Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone());
return DATE_FORMAT.get().format(dateTime);
} }
/** /**
* @return a default instance of {@link CloseableHttpClient}. * Convert time in ozone date format to millisecond.
* @return time in milliseconds
*/ */
public static CloseableHttpClient newHttpClient() { public static long formatDateTime(String date) throws ParseException {
return OzoneClientUtils.newHttpClient(new OzoneConfiguration()); Preconditions.checkNotNull(date, "Date string should not be null.");
return ZonedDateTime.parse(date, DATE_FORMAT.get())
.toInstant().getEpochSecond();
} }
/**
* Returns a {@link CloseableHttpClient} configured by given configuration.
* If conf is null, returns a default instance.
*
* @param conf configuration
* @return a {@link CloseableHttpClient} instance.
*/
public static CloseableHttpClient newHttpClient(Configuration conf) {
long socketTimeout = OzoneConfigKeys
.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
long connectionTimeout = OzoneConfigKeys
.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
if (conf != null) {
socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
connectionTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
}
CloseableHttpClient client = HttpClients.custom()
.setDefaultRequestConfig(
RequestConfig.custom()
.setSocketTimeout(Math.toIntExact(socketTimeout))
.setConnectTimeout(Math.toIntExact(connectionTimeout))
.build())
.build();
return client;
}
/** /**
* verifies that bucket name / volume name is a valid DNS name. * verifies that bucket name / volume name is a valid DNS name.
@ -199,23 +170,53 @@ public final class OzoneClientUtils {
} }
/** /**
* Convert time in millisecond to a human readable format required in ozone. * Returns the cache value to be used for list calls.
* @return a human readable string for the input time * @param conf Configuration object
* @return list cache size
*/ */
public static String formatDateTime(long millis) { public static int getListCacheSize(Configuration conf) {
ZonedDateTime dateTime = ZonedDateTime.ofInstant( return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone()); OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
return DATE_FORMAT.get().format(dateTime);
} }
/** /**
* Convert time in ozone date format to millisecond. * @return a default instance of {@link CloseableHttpClient}.
* @return time in milliseconds
*/ */
public static long formatDateTime(String date) throws ParseException { public static CloseableHttpClient newHttpClient() {
Preconditions.checkNotNull(date, "Date string should not be null."); return HddsClientUtils.newHttpClient(new Configuration());
return ZonedDateTime.parse(date, DATE_FORMAT.get()) }
.toInstant().getEpochSecond();
/**
* Returns a {@link CloseableHttpClient} configured by given configuration.
* If conf is null, returns a default instance.
*
* @param conf configuration
* @return a {@link CloseableHttpClient} instance.
*/
public static CloseableHttpClient newHttpClient(Configuration conf) {
long socketTimeout = OzoneConfigKeys
.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
long connectionTimeout = OzoneConfigKeys
.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
if (conf != null) {
socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
connectionTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
}
CloseableHttpClient client = HttpClients.custom()
.setDefaultRequestConfig(
RequestConfig.custom()
.setSocketTimeout(Math.toIntExact(socketTimeout))
.setConnectTimeout(Math.toIntExact(connectionTimeout))
.build())
.build();
return client;
} }
/** /**

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.client; package org.apache.hadoop.hdds.scm.client;
/** /**
* Client facing classes for the container operations. * Client facing classes for the container operations.

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
/** /**
* Classes for different type of container service client. * Classes for different type of container service client.

View File

@ -16,7 +16,15 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.storage; package org.apache.hadoop.hdds.scm.storage;
import com.google.protobuf.ByteString;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadChunkResponseProto;
import java.io.EOFException; import java.io.EOFException;
import java.io.IOException; import java.io.IOException;
@ -25,14 +33,6 @@ import java.nio.ByteBuffer;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import com.google.protobuf.ByteString;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ReadChunkResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.XceiverClientManager;
/** /**
* An {@link InputStream} used by the REST service in combination with the * An {@link InputStream} used by the REST service in combination with the
* SCMClient to read the value of a key from a sequence * SCMClient to read the value of a key from a sequence

View File

@ -16,24 +16,24 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.storage; package org.apache.hadoop.hdds.scm.storage;
import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.putKey; import com.google.protobuf.ByteString;
import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.writeChunk; import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.UUID; import java.util.UUID;
import com.google.protobuf.ByteString; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putKey;
import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
import org.apache.commons.codec.digest.DigestUtils; .writeChunk;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
/** /**
* An {@link OutputStream} used by the REST service in combination with the * An {@link OutputStream} used by the REST service in combination with the

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.storage; package org.apache.hadoop.hdds.scm.storage;
/** /**
* Low level IO streams to upload/download chunks from container service. * Low level IO streams to upload/download chunks from container service.

View File

@ -16,6 +16,6 @@
--> -->
<FindBugsFilter> <FindBugsFilter>
<Match> <Match>
<Package name="org.apache.hadoop.hdsl.protocol.proto"/> <Package name="org.apache.hadoop.hdds.protocol.proto"/>
</Match> </Match>
</FindBugsFilter> </FindBugsFilter>

View File

@ -19,22 +19,21 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<parent> <parent>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdsl</artifactId> <artifactId>hadoop-hdds</artifactId>
<version>3.2.0-SNAPSHOT</version> <version>3.2.0-SNAPSHOT</version>
</parent> </parent>
<artifactId>hadoop-hdsl-common</artifactId> <artifactId>hadoop-hdds-common</artifactId>
<version>3.2.0-SNAPSHOT</version> <version>3.2.0-SNAPSHOT</version>
<description>Apache Hadoop HDSL Common utilities</description> <description>Apache Hadoop Distributed Data Store Common</description>
<name>Apache Hadoop HDSL Common</name> <name>Apache HDDS Common</name>
<packaging>jar</packaging> <packaging>jar</packaging>
<properties> <properties>
<hadoop.component>hdsl</hadoop.component> <hadoop.component>hdds</hadoop.component>
<is.hadoop.component>true</is.hadoop.component> <is.hadoop.component>true</is.hadoop.component>
</properties> </properties>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.fusesource.leveldbjni</groupId> <groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-all</artifactId> <artifactId>leveldbjni-all</artifactId>
@ -109,7 +108,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<includes> <includes>
<include>StorageContainerLocationProtocol.proto</include> <include>StorageContainerLocationProtocol.proto</include>
<include>DatanodeContainerProtocol.proto</include> <include>DatanodeContainerProtocol.proto</include>
<include>hdsl.proto</include> <include>hdds.proto</include>
<include>ScmBlockLocationProtocol.proto</include> <include>ScmBlockLocationProtocol.proto</include>
</includes> </includes>
</source> </source>

View File

@ -0,0 +1,6 @@
package org.apache.hadoop.hdds;
public class HddsConfigKeys {
private HddsConfigKeys() {
}
}

View File

@ -16,35 +16,34 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdsl; package org.apache.hadoop.hdds;
import java.net.InetSocketAddress;
import java.nio.file.Paths;
import java.util.Collection;
import java.util.HashSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.scm.ScmConfigKeys;
import com.google.common.base.Optional; import com.google.common.base.Optional;
import com.google.common.base.Strings; import com.google.common.base.Strings;
import com.google.common.net.HostAndPort; import com.google.common.net.HostAndPort;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.nio.file.Paths;
import java.util.Collection;
import java.util.HashSet;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
/** /**
* HDSL specific stateless utility functions. * HDDS specific stateless utility functions.
*/ */
public class HdslUtils { public class HddsUtils {
private static final Logger LOG = LoggerFactory.getLogger(HdslUtils.class); private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);
/** /**
* The service ID of the solitary Ozone SCM service. * The service ID of the solitary Ozone SCM service.
@ -55,7 +54,7 @@ public class HdslUtils {
private static final int NO_PORT = -1; private static final int NO_PORT = -1;
private HdslUtils() { private HddsUtils() {
} }
/** /**
@ -233,7 +232,7 @@ public class HdslUtils {
return addresses; return addresses;
} }
public static boolean isHdslEnabled(Configuration conf) { public static boolean isHddsEnabled(Configuration conf) {
String securityEnabled = String securityEnabled =
conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"simple"); "simple");

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.ozone.client; package org.apache.hadoop.hdds.client;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.ozone.client; package org.apache.hadoop.hdds.client;
/** /**
* The replication factor to be used while writing key into ozone. * The replication factor to be used while writing key into ozone.

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.ozone.client; package org.apache.hadoop.hdds.client;
/** /**
* The replication type to be used while writing key into ozone. * The replication type to be used while writing key into ozone.

View File

@ -16,8 +16,8 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.ozone.client; package org.apache.hadoop.hdds.client;
/** /**
* Base property types for HDSL containers and replications. * Base property types for HDDS containers and replications.
*/ */

View File

@ -16,11 +16,11 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdsl.conf; package org.apache.hadoop.hdds.conf;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException; import javax.xml.bind.JAXBException;
import javax.xml.bind.Unmarshaller; import javax.xml.bind.Unmarshaller;
@ -28,8 +28,9 @@ import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience; import java.net.URL;
import org.apache.hadoop.conf.Configuration; import java.util.ArrayList;
import java.util.List;
/** /**
* Configuration for ozone. * Configuration for ozone.

View File

@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdsl.conf; package org.apache.hadoop.hdds.conf;

View File

@ -16,8 +16,8 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdsl; package org.apache.hadoop.hdds;
/** /**
* Generic HDSL specific configurator and helper classes. * Generic HDDS specific configurator and helper classes.
*/ */

View File

@ -16,12 +16,12 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdsl.protocol; package org.apache.hadoop.hdds.protocol;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.util.UUID; import java.util.UUID;
@ -222,7 +222,7 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
* @return DatanodeDetails * @return DatanodeDetails
*/ */
public static DatanodeDetails getFromProtoBuf( public static DatanodeDetails getFromProtoBuf(
HdslProtos.DatanodeDetailsProto datanodeDetailsProto) { HddsProtos.DatanodeDetailsProto datanodeDetailsProto) {
DatanodeDetails.Builder builder = newBuilder(); DatanodeDetails.Builder builder = newBuilder();
builder.setUuid(datanodeDetailsProto.getUuid()); builder.setUuid(datanodeDetailsProto.getUuid());
if (datanodeDetailsProto.hasIpAddress()) { if (datanodeDetailsProto.hasIpAddress()) {
@ -251,11 +251,11 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
/** /**
* Returns a DatanodeDetails protobuf message from a datanode ID. * Returns a DatanodeDetails protobuf message from a datanode ID.
* @return Hdsl.DatanodeDetailsProto * @return HddsProtos.DatanodeDetailsProto
*/ */
public HdslProtos.DatanodeDetailsProto getProtoBufMessage() { public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
HdslProtos.DatanodeDetailsProto.Builder builder = HddsProtos.DatanodeDetailsProto.Builder builder =
HdslProtos.DatanodeDetailsProto.newBuilder() HddsProtos.DatanodeDetailsProto.newBuilder()
.setUuid(getUuidString()); .setUuid(getUuidString());
if (ipAddress != null) { if (ipAddress != null) {
builder.setIpAddress(ipAddress); builder.setIpAddress(ipAddress);

View File

@ -17,6 +17,6 @@
*/ */
/** /**
* This package contains HDSL protocol related classes. * This package contains HDDS protocol related classes.
*/ */
package org.apache.hadoop.hdsl.protocol; package org.apache.hadoop.hdds.protocol;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -121,18 +121,18 @@ public final class ScmConfigKeys {
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876; public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877; public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
public static final String HDSL_REST_HTTP_ADDRESS_KEY = public static final String HDDS_REST_HTTP_ADDRESS_KEY =
"hdsl.rest.http-address"; "hdds.rest.http-address";
public static final String HDSL_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880"; public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
public static final String HDSL_REST_CSRF_ENABLED_KEY = public static final String HDDS_REST_CSRF_ENABLED_KEY =
"hdsl.rest.rest-csrf.enabled"; "hdds.rest.rest-csrf.enabled";
public static final boolean HDSL_REST_CSRF_ENABLED_DEFAULT = false; public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false;
public static final String HDSL_REST_NETTY_HIGH_WATERMARK = public static final String HDDS_REST_NETTY_HIGH_WATERMARK =
"hdsl.rest.netty.high.watermark"; "hdds.rest.netty.high.watermark";
public static final int HDSL_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536; public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536;
public static final int HDSL_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768; public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768;
public static final String HDSL_REST_NETTY_LOW_WATERMARK = public static final String HDDS_REST_NETTY_LOW_WATERMARK =
"hdsl.rest.netty.low.watermark"; "hdds.rest.netty.low.watermark";
public static final String OZONE_SCM_HANDLER_COUNT_KEY = public static final String OZONE_SCM_HANDLER_COUNT_KEY =
"ozone.scm.handler.count.key"; "ozone.scm.handler.count.key";

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
/** /**
* ScmInfo wraps the result returned from SCM#getScmInfo which * ScmInfo wraps the result returned from SCM#getScmInfo which

View File

@ -16,16 +16,16 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandRequestProto; .ContainerCommandRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto; .ContainerCommandResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -125,5 +125,5 @@ public abstract class XceiverClientSpi implements Closeable {
* *
* @return - {Stand_Alone, Ratis or Chained} * @return - {Stand_Alone, Ratis or Chained}
*/ */
public abstract HdslProtos.ReplicationType getPipelineType(); public abstract HddsProtos.ReplicationType getPipelineType();
} }

View File

@ -15,13 +15,13 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.client; package org.apache.hadoop.hdds.scm.client;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
@ -111,8 +111,8 @@ public interface ScmClient {
* @return Pipeline * @return Pipeline
* @throws IOException - in case of error. * @throws IOException - in case of error.
*/ */
Pipeline createContainer(HdslProtos.ReplicationType type, Pipeline createContainer(HddsProtos.ReplicationType type,
HdslProtos.ReplicationFactor replicationFactor, String containerId, HddsProtos.ReplicationFactor replicationFactor, String containerId,
String owner) throws IOException; String owner) throws IOException;
/** /**
@ -123,8 +123,8 @@ public interface ScmClient {
* @return A set of nodes that meet the requested criteria. * @return A set of nodes that meet the requested criteria.
* @throws IOException * @throws IOException
*/ */
HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> nodeStatuses, HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses,
HdslProtos.QueryScope queryScope, String poolName) throws IOException; HddsProtos.QueryScope queryScope, String poolName) throws IOException;
/** /**
* Creates a specified replication pipeline. * Creates a specified replication pipeline.
@ -133,7 +133,7 @@ public interface ScmClient {
* @param nodePool - Set of machines. * @param nodePool - Set of machines.
* @throws IOException * @throws IOException
*/ */
Pipeline createReplicationPipeline(HdslProtos.ReplicationType type, Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool) HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException; throws IOException;
} }

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.client; package org.apache.hadoop.hdds.scm.client;
/** /**
* This package contains classes for the client of the storage container * This package contains classes for the client of the storage container

View File

@ -16,7 +16,7 @@
* *
*/ */
package org.apache.hadoop.ozone.scm.container.ContainerStates; package org.apache.hadoop.hdds.scm.container;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.commons.math3.util.MathUtils; import org.apache.commons.math3.util.MathUtils;

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.container.common.helpers; package org.apache.hadoop.hdds.scm.container.common.helpers;
/** /**
* Allocated block wraps the result returned from SCM#allocateBlock which * Allocated block wraps the result returned from SCM#allocateBlock which

View File

@ -16,13 +16,13 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.container.common.helpers; package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import java.util.Comparator; import java.util.Comparator;
@ -32,7 +32,7 @@ import java.util.Comparator;
*/ */
public class ContainerInfo public class ContainerInfo
implements Comparator<ContainerInfo>, Comparable<ContainerInfo> { implements Comparator<ContainerInfo>, Comparable<ContainerInfo> {
private HdslProtos.LifeCycleState state; private HddsProtos.LifeCycleState state;
private Pipeline pipeline; private Pipeline pipeline;
// Bytes allocated by SCM for clients. // Bytes allocated by SCM for clients.
private long allocatedBytes; private long allocatedBytes;
@ -48,7 +48,7 @@ public class ContainerInfo
ContainerInfo( ContainerInfo(
long containerID, long containerID,
final String containerName, final String containerName,
HdslProtos.LifeCycleState state, HddsProtos.LifeCycleState state,
Pipeline pipeline, Pipeline pipeline,
long allocatedBytes, long allocatedBytes,
long usedBytes, long usedBytes,
@ -73,7 +73,7 @@ public class ContainerInfo
public ContainerInfo() { public ContainerInfo() {
} }
public static ContainerInfo fromProtobuf(HdslProtos.SCMContainerInfo info) { public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) {
ContainerInfo.Builder builder = new ContainerInfo.Builder(); ContainerInfo.Builder builder = new ContainerInfo.Builder();
builder.setPipeline(Pipeline.getFromProtoBuf(info.getPipeline())); builder.setPipeline(Pipeline.getFromProtoBuf(info.getPipeline()));
builder.setAllocatedBytes(info.getAllocatedBytes()); builder.setAllocatedBytes(info.getAllocatedBytes());
@ -95,11 +95,11 @@ public class ContainerInfo
return containerName; return containerName;
} }
public HdslProtos.LifeCycleState getState() { public HddsProtos.LifeCycleState getState() {
return state; return state;
} }
public void setState(HdslProtos.LifeCycleState state) { public void setState(HddsProtos.LifeCycleState state) {
this.state = state; this.state = state;
} }
@ -156,9 +156,9 @@ public class ContainerInfo
allocatedBytes += size; allocatedBytes += size;
} }
public HdslProtos.SCMContainerInfo getProtobuf() { public HddsProtos.SCMContainerInfo getProtobuf() {
HdslProtos.SCMContainerInfo.Builder builder = HddsProtos.SCMContainerInfo.Builder builder =
HdslProtos.SCMContainerInfo.newBuilder(); HddsProtos.SCMContainerInfo.newBuilder();
builder.setPipeline(getPipeline().getProtobufMessage()); builder.setPipeline(getPipeline().getProtobufMessage());
builder.setAllocatedBytes(getAllocatedBytes()); builder.setAllocatedBytes(getAllocatedBytes());
builder.setUsedBytes(getUsedBytes()); builder.setUsedBytes(getUsedBytes());
@ -268,7 +268,7 @@ public class ContainerInfo
* Builder class for ContainerInfo. * Builder class for ContainerInfo.
*/ */
public static class Builder { public static class Builder {
private HdslProtos.LifeCycleState state; private HddsProtos.LifeCycleState state;
private Pipeline pipeline; private Pipeline pipeline;
private long allocated; private long allocated;
private long used; private long used;
@ -284,7 +284,7 @@ public class ContainerInfo
return this; return this;
} }
public Builder setState(HdslProtos.LifeCycleState lifeCycleState) { public Builder setState(HddsProtos.LifeCycleState lifeCycleState) {
this.state = lifeCycleState; this.state = lifeCycleState;
return this; return this;
} }

View File

@ -15,10 +15,9 @@
* the License. * the License.
*/ */
package org.apache.hadoop.scm.container.common.helpers; package org.apache.hadoop.hdds.scm.container.common.helpers;
import static org.apache.hadoop.hdds.protocol.proto
import static org.apache.hadoop.hdsl.protocol.proto
.ScmBlockLocationProtocolProtos.DeleteScmBlockResult; .ScmBlockLocationProtocolProtos.DeleteScmBlockResult;
/** /**

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.container.common.helpers; package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.fasterxml.jackson.annotation.JsonAutoDetect; import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonFilter; import com.fasterxml.jackson.annotation.JsonFilter;
@ -29,8 +29,8 @@ import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -83,14 +83,14 @@ public class Pipeline {
* @param pipeline - ProtoBuf definition for the pipeline. * @param pipeline - ProtoBuf definition for the pipeline.
* @return Pipeline Object * @return Pipeline Object
*/ */
public static Pipeline getFromProtoBuf(HdslProtos.Pipeline pipeline) { public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) {
Preconditions.checkNotNull(pipeline); Preconditions.checkNotNull(pipeline);
PipelineChannel pipelineChannel = PipelineChannel pipelineChannel =
PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel()); PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel());
return new Pipeline(pipeline.getContainerName(), pipelineChannel); return new Pipeline(pipeline.getContainerName(), pipelineChannel);
} }
public HdslProtos.ReplicationFactor getFactor() { public HddsProtos.ReplicationFactor getFactor() {
return pipelineChannel.getFactor(); return pipelineChannel.getFactor();
} }
@ -143,9 +143,9 @@ public class Pipeline {
* @return Protobuf message * @return Protobuf message
*/ */
@JsonIgnore @JsonIgnore
public HdslProtos.Pipeline getProtobufMessage() { public HddsProtos.Pipeline getProtobufMessage() {
HdslProtos.Pipeline.Builder builder = HddsProtos.Pipeline.Builder builder =
HdslProtos.Pipeline.newBuilder(); HddsProtos.Pipeline.newBuilder();
builder.setContainerName(this.containerName); builder.setContainerName(this.containerName);
builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage()); builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage());
return builder.build(); return builder.build();
@ -194,7 +194,7 @@ public class Pipeline {
* *
* @return - LifeCycleStates. * @return - LifeCycleStates.
*/ */
public HdslProtos.LifeCycleState getLifeCycleState() { public HddsProtos.LifeCycleState getLifeCycleState() {
return pipelineChannel.getLifeCycleState(); return pipelineChannel.getLifeCycleState();
} }
@ -212,7 +212,7 @@ public class Pipeline {
* *
* @return type - Standalone, Ratis, Chained. * @return type - Standalone, Ratis, Chained.
*/ */
public HdslProtos.ReplicationType getType() { public HddsProtos.ReplicationType getType() {
return pipelineChannel.getType(); return pipelineChannel.getType();
} }

View File

@ -15,15 +15,15 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.container.common.helpers; package org.apache.hadoop.hdds.scm.container.common.helpers;
import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.hdsl.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
@ -82,9 +82,9 @@ public class PipelineChannel {
} }
@JsonIgnore @JsonIgnore
public HdslProtos.PipelineChannel getProtobufMessage() { public HddsProtos.PipelineChannel getProtobufMessage() {
HdslProtos.PipelineChannel.Builder builder = HddsProtos.PipelineChannel.Builder builder =
HdslProtos.PipelineChannel.newBuilder(); HddsProtos.PipelineChannel.newBuilder();
for (DatanodeDetails datanode : datanodes.values()) { for (DatanodeDetails datanode : datanodes.values()) {
builder.addMembers(datanode.getProtoBufMessage()); builder.addMembers(datanode.getProtoBufMessage());
} }
@ -104,7 +104,7 @@ public class PipelineChannel {
} }
public static PipelineChannel getFromProtoBuf( public static PipelineChannel getFromProtoBuf(
HdslProtos.PipelineChannel transportProtos) { HddsProtos.PipelineChannel transportProtos) {
Preconditions.checkNotNull(transportProtos); Preconditions.checkNotNull(transportProtos);
PipelineChannel pipelineChannel = PipelineChannel pipelineChannel =
new PipelineChannel(transportProtos.getLeaderID(), new PipelineChannel(transportProtos.getLeaderID(),
@ -113,7 +113,7 @@ public class PipelineChannel {
transportProtos.getFactor(), transportProtos.getFactor(),
transportProtos.getName()); transportProtos.getName());
for (HdslProtos.DatanodeDetailsProto dataID : for (HddsProtos.DatanodeDetailsProto dataID :
transportProtos.getMembersList()) { transportProtos.getMembersList()) {
pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID)); pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID));
} }

View File

@ -15,9 +15,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.container.common.helpers; package org.apache.hadoop.hdds.scm.container.common.helpers;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import java.io.IOException; import java.io.IOException;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.container.common.helpers; package org.apache.hadoop.hdds.scm.container.common.helpers;
/** /**
Contains protocol buffer helper classes and utilites used in Contains protocol buffer helper classes and utilites used in
impl. impl.

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm; package org.apache.hadoop.hdds.scm;
/** /**
* This package contains classes for the client of the storage container * This package contains classes for the client of the storage container

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.protocol; package org.apache.hadoop.hdds.scm.protocol;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;

View File

@ -15,19 +15,19 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.protocol; package org.apache.hadoop.hdds.scm.protocol;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor;
import org.apache.hadoop.scm.ScmInfo;
/** /**
* ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
* to read/write a block. * to read/write a block.

View File

@ -16,14 +16,14 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.protocol; package org.apache.hadoop.hdds.scm.protocol;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import java.util.List;
import java.util.stream.Collectors;
/** /**
* Holds the nodes that currently host the block for a block key. * Holds the nodes that currently host the block for a block key.
*/ */

View File

@ -15,18 +15,19 @@
* the License. * the License.
*/ */
package org.apache.hadoop.scm.protocol; package org.apache.hadoop.hdds.scm.protocol;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import java.io.IOException; import java.io.IOException;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.apache.hadoop.scm.ScmInfo;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
/** /**
* ContainerLocationProtocol is used by an HDFS node to find the set of nodes * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
* that currently host a container. * that currently host a container.
@ -37,8 +38,8 @@ public interface StorageContainerLocationProtocol {
* set of datanodes that should be used creating this container. * set of datanodes that should be used creating this container.
* *
*/ */
Pipeline allocateContainer(HdslProtos.ReplicationType replicationType, Pipeline allocateContainer(HddsProtos.ReplicationType replicationType,
HdslProtos.ReplicationFactor factor, String containerName, String owner) HddsProtos.ReplicationFactor factor, String containerName, String owner)
throws IOException; throws IOException;
/** /**
@ -85,8 +86,8 @@ public interface StorageContainerLocationProtocol {
* @param nodeStatuses * @param nodeStatuses
* @return List of Datanodes. * @return List of Datanodes.
*/ */
HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> nodeStatuses, HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses,
HdslProtos.QueryScope queryScope, String poolName) throws IOException; HddsProtos.QueryScope queryScope, String poolName) throws IOException;
/** /**
* Notify from client when begin or finish creating objects like pipeline * Notify from client when begin or finish creating objects like pipeline
@ -109,8 +110,8 @@ public interface StorageContainerLocationProtocol {
* @param nodePool - optional machine list to build a pipeline. * @param nodePool - optional machine list to build a pipeline.
* @throws IOException * @throws IOException
*/ */
Pipeline createReplicationPipeline(HdslProtos.ReplicationType type, Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool) HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
throws IOException; throws IOException;
/** /**

View File

@ -16,4 +16,4 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.ozone.scm.cli; package org.apache.hadoop.hdds.scm.protocol;

View File

@ -14,31 +14,39 @@
* License for the specific language governing permissions and limitations under * License for the specific language governing permissions and limitations under
* the License. * the License.
*/ */
package org.apache.hadoop.scm.protocolPB; package org.apache.hadoop.hdds.scm.protocolPB;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.AllocateScmBlockRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.AllocateScmBlockResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.DeleteScmKeyBlocksRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.DeleteScmKeyBlocksResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.GetScmBlockLocationsRequestProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.GetScmBlockLocationsResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.KeyBlocks;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.ScmLocatedBlockProto;
import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.GetScmBlockLocationsRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.GetScmBlockLocationsResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.ScmLocatedBlockProto;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks;
import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.scm.ScmInfo;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -117,7 +125,7 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB
*/ */
@Override @Override
public AllocatedBlock allocateBlock(long size, public AllocatedBlock allocateBlock(long size,
HdslProtos.ReplicationType type, HdslProtos.ReplicationFactor factor, HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
String owner) throws IOException { String owner) throws IOException {
Preconditions.checkArgument(size > 0, "block size must be greater than 0"); Preconditions.checkArgument(size > 0, "block size must be greater than 0");
@ -181,9 +189,9 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB
*/ */
@Override @Override
public ScmInfo getScmInfo() throws IOException { public ScmInfo getScmInfo() throws IOException {
HdslProtos.GetScmInfoRequestProto request = HddsProtos.GetScmInfoRequestProto request =
HdslProtos.GetScmInfoRequestProto.getDefaultInstance(); HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
HdslProtos.GetScmInfoRespsonseProto resp; HddsProtos.GetScmInfoRespsonseProto resp;
try { try {
resp = rpcProxy.getScmInfo(NULL_RPC_CONTROLLER, request); resp = rpcProxy.getScmInfo(NULL_RPC_CONTROLLER, request);
} catch (ServiceException e) { } catch (ServiceException e) {

View File

@ -15,12 +15,12 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.protocolPB; package org.apache.hadoop.hdds.scm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos
.ScmBlockLocationProtocolService; .ScmBlockLocationProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
/** /**
* Protocol used from an HDFS node to StorageContainerManager. This extends the * Protocol used from an HDFS node to StorageContainerManager. This extends the

View File

@ -14,33 +14,45 @@
* License for the specific language governing permissions and limitations under * License for the specific language governing permissions and limitations under
* the License. * the License.
*/ */
package org.apache.hadoop.scm.protocolPB; package org.apache.hadoop.hdds.scm.protocolPB;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.base.Strings; import com.google.common.base.Strings;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.PipelineRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.PipelineResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
import org.apache.hadoop.scm.ScmInfo;
import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -85,8 +97,8 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
* @throws IOException * @throws IOException
*/ */
@Override @Override
public Pipeline allocateContainer(HdslProtos.ReplicationType type, public Pipeline allocateContainer(HddsProtos.ReplicationType type,
HdslProtos.ReplicationFactor factor, String HddsProtos.ReplicationFactor factor, String
containerName, String owner) throws IOException { containerName, String owner) throws IOException {
Preconditions.checkNotNull(containerName, "Container Name cannot be Null"); Preconditions.checkNotNull(containerName, "Container Name cannot be Null");
@ -151,7 +163,7 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
SCMListContainerResponseProto response = SCMListContainerResponseProto response =
rpcProxy.listContainer(NULL_RPC_CONTROLLER, request); rpcProxy.listContainer(NULL_RPC_CONTROLLER, request);
List<ContainerInfo> containerList = new ArrayList<>(); List<ContainerInfo> containerList = new ArrayList<>();
for (HdslProtos.SCMContainerInfo containerInfoProto : response for (HddsProtos.SCMContainerInfo containerInfoProto : response
.getContainersList()) { .getContainersList()) {
containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
} }
@ -191,8 +203,8 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
* @return List of Datanodes. * @return List of Datanodes.
*/ */
@Override @Override
public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
nodeStatuses, HdslProtos.QueryScope queryScope, String poolName) nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
throws IOException { throws IOException {
// TODO : We support only cluster wide query right now. So ignoring checking // TODO : We support only cluster wide query right now. So ignoring checking
// queryScope and poolName // queryScope and poolName
@ -248,8 +260,8 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
* @throws IOException * @throws IOException
*/ */
@Override @Override
public Pipeline createReplicationPipeline(HdslProtos.ReplicationType public Pipeline createReplicationPipeline(HddsProtos.ReplicationType
replicationType, HdslProtos.ReplicationFactor factor, HdslProtos replicationType, HddsProtos.ReplicationFactor factor, HddsProtos
.NodePool nodePool) throws IOException { .NodePool nodePool) throws IOException {
PipelineRequestProto request = PipelineRequestProto.newBuilder() PipelineRequestProto request = PipelineRequestProto.newBuilder()
.setNodePool(nodePool) .setNodePool(nodePool)
@ -277,10 +289,10 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
@Override @Override
public ScmInfo getScmInfo() throws IOException { public ScmInfo getScmInfo() throws IOException {
HdslProtos.GetScmInfoRequestProto request = HddsProtos.GetScmInfoRequestProto request =
HdslProtos.GetScmInfoRequestProto.getDefaultInstance(); HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
try { try {
HdslProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo( HddsProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo(
NULL_RPC_CONTROLLER, request); NULL_RPC_CONTROLLER, request);
ScmInfo.Builder builder = new ScmInfo.Builder() ScmInfo.Builder builder = new ScmInfo.Builder()
.setClusterId(resp.getClusterId()) .setClusterId(resp.getClusterId())

View File

@ -15,11 +15,13 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.protocolPB; package org.apache.hadoop.hdds.scm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos
.StorageContainerLocationProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService;
/** /**
* Protocol used from an HDFS node to StorageContainerManager. This extends the * Protocol used from an HDFS node to StorageContainerManager. This extends the

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.protocolPB; package org.apache.hadoop.hdds.scm.protocolPB;
/** /**
* This package contains classes for the client of the storage container * This package contains classes for the client of the storage container

View File

@ -16,44 +16,43 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.storage; package org.apache.hadoop.hdds.scm.storage;
import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo; import org.apache.hadoop.hdds.scm.container.common.helpers
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos .StorageContainerException;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandRequestProto; .ContainerCommandRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto; .ContainerCommandResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto;
.GetKeyRequestProto; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.GetKeyResponseProto; .GetKeyResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.GetSmallFileRequestProto; .GetSmallFileRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.GetSmallFileResponseProto; .GetSmallFileResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.KeyData; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto;
.PutKeyRequestProto; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.PutSmallFileRequestProto; .PutSmallFileRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadChunkRequestProto; .ReadChunkRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ReadChunkResponseProto; .ReadChunkResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.WriteChunkRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.ReadContainerResponseProto;
import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
.ReadContainerRequestProto; .ReadContainerRequestProto;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; .ReadContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.WriteChunkRequestProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.scm.XceiverClientSpi;
/** /**
* Implementation of all container protocol calls performed by Container * Implementation of all container protocol calls performed by Container

View File

@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.scm.storage; package org.apache.hadoop.hdds.scm.storage;
/** /**
* This package contains StorageContainerManager classes. * This package contains StorageContainerManager classes.

View File

@ -20,10 +20,9 @@ package org.apache.hadoop.ozone;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ozone.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.ozone.client.ReplicationType; import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.scm.ScmConfigKeys;
/** /**
* This class contains constants for configuration keys used in Ozone. * This class contains constants for configuration keys used in Ozone.

View File

@ -17,8 +17,8 @@
package org.apache.hadoop.ozone.common; package org.apache.hadoop.ozone.common;
import org.apache.hadoop.hdsl.protocol.proto import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.ScmBlockLocationProtocolProtos.KeyBlocks; .KeyBlocks;
import java.util.List; import java.util.List;

View File

@ -17,9 +17,11 @@
*/ */
package org.apache.hadoop.ozone.common; package org.apache.hadoop.ozone.common;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult; import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
import org.apache.hadoop.scm.container.common.helpers.DeleteBlockResult; .DeleteScmBlockResult;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.DeleteScmBlockResult.Result;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;

View File

@ -17,12 +17,12 @@
*/ */
package org.apache.hadoop.ozone.common; package org.apache.hadoop.ozone.common;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import java.io.File;
import java.io.IOException;
/** /**
* The exception is thrown when file system state is inconsistent * The exception is thrown when file system state is inconsistent
* and is not recoverable. * and is not recoverable.

View File

@ -17,6 +17,13 @@
*/ */
package org.apache.hadoop.ozone.common; package org.apache.hadoop.ozone.common;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.nio.file.DirectoryStream; import java.nio.file.DirectoryStream;
@ -24,14 +31,6 @@ import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.util.Properties; import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType;
import org.apache.hadoop.util.Time;
/** /**
* Storage information file. This Class defines the methods to check * Storage information file. This Class defines the methods to check
* the consistency of the storage dir and the version file. * the consistency of the storage dir and the version file.

View File

@ -17,18 +17,17 @@
*/ */
package org.apache.hadoop.ozone.common; package org.apache.hadoop.ozone.common;
import java.io.IOException;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.File;
import java.io.RandomAccessFile;
import java.util.Properties;
import java.util.UUID;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Properties;
import java.util.UUID;
/** /**
* Common class for storage information. This class defines the common * Common class for storage information. This class defines the common

Some files were not shown because too many files have changed in this diff Show More