HDDS-1382. Create customized CSI server for Ozone

Closes #693
This commit is contained in:
Márton Elek 2019-05-31 13:35:58 +02:00
parent 52128e352a
commit 1ae062c818
No known key found for this signature in database
GPG Key ID: D51EA8F00EE79B28
16 changed files with 2141 additions and 2 deletions

View File

@ -226,7 +226,12 @@ public final class HddsUtils {
if ((value == null) || value.isEmpty()) { if ((value == null) || value.isEmpty()) {
return Optional.empty(); return Optional.empty();
} }
return Optional.of(HostAndPort.fromString(value).getHostText()); String hostname = value.replaceAll("\\:[0-9]+$", "");
if (hostname.length() == 0) {
return Optional.empty();
} else {
return Optional.of(hostname);
}
} }
/** /**

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds;
import java.util.Optional;
import org.junit.Assert;
import org.junit.Test;
/**
* Testing HddsUtils.
*/
public class TestHddsUtils {
@Test
public void testGetHostName() {
Assert.assertEquals(Optional.of("localhost"),
HddsUtils.getHostName("localhost:1234"));
Assert.assertEquals(Optional.of("localhost"),
HddsUtils.getHostName("localhost"));
Assert.assertEquals(Optional.empty(),
HddsUtils.getHostName(":1234"));
}
}

View File

@ -46,6 +46,7 @@ function hadoop_usage
hadoop_add_subcommand "om" daemon "Ozone Manager" hadoop_add_subcommand "om" daemon "Ozone Manager"
hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
hadoop_add_subcommand "s3g" daemon "run the S3 compatible REST gateway" hadoop_add_subcommand "s3g" daemon "run the S3 compatible REST gateway"
hadoop_add_subcommand "csi" daemon "run the standalone CSI daemon"
hadoop_add_subcommand "recon" daemon "run the Recon service" hadoop_add_subcommand "recon" daemon "run the Recon service"
hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager" hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager"
hadoop_add_subcommand "sh" client "command line interface for object store operations" hadoop_add_subcommand "sh" client "command line interface for object store operations"
@ -154,6 +155,11 @@ function ozonecmd_case
HADOOP_CLASSNAME='org.apache.hadoop.ozone.s3.Gateway' HADOOP_CLASSNAME='org.apache.hadoop.ozone.s3.Gateway'
OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-s3gateway" OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-s3gateway"
;; ;;
csi)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME='org.apache.hadoop.ozone.csi.CsiServer'
OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-csi"
;;
recon) recon)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME='org.apache.hadoop.ozone.recon.ReconServer' HADOOP_CLASSNAME='org.apache.hadoop.ozone.recon.ReconServer'

View File

@ -0,0 +1,22 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<FindBugsFilter>
<Match>
<Package name="csi.v1"/>
</Match>
</FindBugsFilter>

169
hadoop-ozone/csi/pom.xml Normal file
View File

@ -0,0 +1,169 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone</artifactId>
<version>0.5.0-SNAPSHOT</version>
</parent>
<artifactId>hadoop-ozone-csi</artifactId>
<version>0.5.0-SNAPSHOT</version>
<description>Apache Hadoop Ozone CSI service</description>
<name>Apache Hadoop Ozone CSI service</name>
<packaging>jar</packaging>
<properties>
<grpc.version>1.17.1</grpc.version>
</properties>
<dependencies>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java-util</artifactId>
<version>3.5.1</version>
<exclusions>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-config</artifactId>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>26.0-android</version>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>3.5.1</version>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-netty</artifactId>
<version>${grpc.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-transport-native-epoll</artifactId>
<version>4.1.30.Final</version>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-protobuf</artifactId>
<version>${grpc.version}</version>
<exclusions>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.grpc</groupId>
<artifactId>grpc-stub</artifactId>
<version>${grpc.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-client</artifactId>
<exclusions>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>
<extensions>
<extension>
<groupId>kr.motd.maven</groupId>
<artifactId>os-maven-plugin</artifactId>
<version>${os-maven-plugin.version}</version>
</extension>
</extensions>
<plugins>
<plugin>
<groupId>org.xolstice.maven.plugins</groupId>
<artifactId>protobuf-maven-plugin</artifactId>
<version>${protobuf-maven-plugin.version}</version>
<extensions>true</extensions>
<configuration>
<protocArtifact>
com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier}
</protocArtifact>
<protoSourceRoot>${basedir}/src/main/proto/</protoSourceRoot>
<includes>
<include>csi.proto</include>
</includes>
<outputDirectory>target/generated-sources/java</outputDirectory>
<clearOutputDirectory>false</clearOutputDirectory>
</configuration>
<executions>
<execution>
<id>compile-protoc</id>
<goals>
<goal>compile</goal>
<goal>test-compile</goal>
<goal>compile-custom</goal>
<goal>test-compile-custom</goal>
</goals>
<configuration>
<pluginId>grpc-java</pluginId>
<pluginArtifact>
io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
</pluginArtifact>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>depcheck</id>
<phase></phase>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>findbugs-maven-plugin</artifactId>
<configuration>
<excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
</excludeFilterFile>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,123 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.csi;
import java.io.IOException;
import org.apache.hadoop.ozone.client.OzoneClient;
import csi.v1.ControllerGrpc.ControllerImplBase;
import csi.v1.Csi.CapacityRange;
import csi.v1.Csi.ControllerGetCapabilitiesRequest;
import csi.v1.Csi.ControllerGetCapabilitiesResponse;
import csi.v1.Csi.ControllerServiceCapability;
import csi.v1.Csi.ControllerServiceCapability.RPC;
import csi.v1.Csi.ControllerServiceCapability.RPC.Type;
import csi.v1.Csi.CreateVolumeRequest;
import csi.v1.Csi.CreateVolumeResponse;
import csi.v1.Csi.DeleteVolumeRequest;
import csi.v1.Csi.DeleteVolumeResponse;
import csi.v1.Csi.Volume;
import io.grpc.stub.StreamObserver;
/**
* CSI controller service.
* <p>
* This service usually runs only once and responsible for the creation of
* the volume.
*/
public class ControllerService extends ControllerImplBase {
private final String volumeOwner;
private long defaultVolumeSize;
private OzoneClient ozoneClient;
public ControllerService(OzoneClient ozoneClient, long volumeSize,
String volumeOwner) {
this.volumeOwner = volumeOwner;
this.defaultVolumeSize = volumeSize;
this.ozoneClient = ozoneClient;
}
@Override
public void createVolume(CreateVolumeRequest request,
StreamObserver<CreateVolumeResponse> responseObserver) {
try {
ozoneClient.getObjectStore()
.createS3Bucket(volumeOwner, request.getName());
long size = findSize(request.getCapacityRange());
CreateVolumeResponse response = CreateVolumeResponse.newBuilder()
.setVolume(Volume.newBuilder()
.setVolumeId(request.getName())
.setCapacityBytes(size))
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
} catch (IOException e) {
responseObserver.onError(e);
}
}
private long findSize(CapacityRange capacityRange) {
if (capacityRange.getRequiredBytes() != 0) {
return capacityRange.getRequiredBytes();
} else {
if (capacityRange.getLimitBytes() != 0) {
return Math.min(defaultVolumeSize, capacityRange.getLimitBytes());
} else {
//~1 gig
return defaultVolumeSize;
}
}
}
@Override
public void deleteVolume(DeleteVolumeRequest request,
StreamObserver<DeleteVolumeResponse> responseObserver) {
try {
ozoneClient.getObjectStore().deleteS3Bucket(request.getVolumeId());
DeleteVolumeResponse response = DeleteVolumeResponse.newBuilder()
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
} catch (IOException e) {
responseObserver.onError(e);
}
}
@Override
public void controllerGetCapabilities(
ControllerGetCapabilitiesRequest request,
StreamObserver<ControllerGetCapabilitiesResponse> responseObserver) {
ControllerGetCapabilitiesResponse response =
ControllerGetCapabilitiesResponse.newBuilder()
.addCapabilities(
ControllerServiceCapability.newBuilder().setRpc(
RPC.newBuilder().setType(Type.CREATE_DELETE_VOLUME)))
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
}

View File

@ -0,0 +1,160 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.csi;
import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.cli.GenericCli;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.conf.Config;
import org.apache.hadoop.hdds.conf.ConfigGroup;
import org.apache.hadoop.hdds.conf.ConfigTag;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.util.StringUtils;
import io.grpc.Server;
import io.grpc.netty.NettyServerBuilder;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollServerDomainSocketChannel;
import io.netty.channel.unix.DomainSocketAddress;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import picocli.CommandLine.Command;
/**
* CLI entrypoint of the CSI service daemon.
*/
@Command(name = "ozone csi",
hidden = true, description = "CSI service daemon.",
versionProvider = HddsVersionProvider.class,
mixinStandardHelpOptions = true)
public class CsiServer extends GenericCli implements Callable<Void> {
private final static Logger LOG = LoggerFactory.getLogger(CsiServer.class);
@Override
public Void call() throws Exception {
OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
CsiConfig csiConfig = ozoneConfiguration.getObject(CsiConfig.class);
OzoneClient rpcClient = OzoneClientFactory.getRpcClient(ozoneConfiguration);
EpollEventLoopGroup group = new EpollEventLoopGroup();
if (csiConfig.getVolumeOwner().isEmpty()) {
throw new IllegalArgumentException(
"ozone.csi.owner is not set. You should set this configuration "
+ "variable to define which user should own all the created "
+ "buckets.");
}
Server server =
NettyServerBuilder
.forAddress(new DomainSocketAddress(csiConfig.getSocketPath()))
.channelType(EpollServerDomainSocketChannel.class)
.workerEventLoopGroup(group)
.bossEventLoopGroup(group)
.addService(new IdentitiyService())
.addService(new ControllerService(rpcClient,
csiConfig.getDefaultVolumeSize(), csiConfig.getVolumeOwner()))
.addService(new NodeService(csiConfig))
.build();
server.start();
server.awaitTermination();
rpcClient.close();
return null;
}
public static void main(String[] args) {
StringUtils.startupShutdownMessage(CsiServer.class, args, LOG);
new CsiServer().run(args);
}
/**
* Configuration settings specific to the CSI server.
*/
@ConfigGroup(prefix = "ozone.csi")
public static class CsiConfig {
private String socketPath;
private long defaultVolumeSize;
private String s3gAddress;
private String volumeOwner;
public String getSocketPath() {
return socketPath;
}
public String getVolumeOwner() {
return volumeOwner;
}
@Config(key = "owner",
defaultValue = "",
description =
"This is the username which is used to create the requested "
+ "storage. Used as a hadoop username and the generated ozone"
+ " volume used to store all the buckets. WARNING: It can "
+ "be a security hole to use CSI in a secure environments as "
+ "ALL the users can request the mount of a specific bucket "
+ "via the CSI interface.",
tags = ConfigTag.STORAGE)
public void setVolumeOwner(String volumeOwner) {
this.volumeOwner = volumeOwner;
}
@Config(key = "socket",
defaultValue = "/var/lib/csi.sock",
description =
"The socket where all the CSI services will listen (file name).",
tags = ConfigTag.STORAGE)
public void setSocketPath(String socketPath) {
this.socketPath = socketPath;
}
public long getDefaultVolumeSize() {
return defaultVolumeSize;
}
@Config(key = "default-volume-size",
defaultValue = "1000000000",
description =
"The default size of the create volumes (if not specified).",
tags = ConfigTag.STORAGE)
public void setDefaultVolumeSize(long defaultVolumeSize) {
this.defaultVolumeSize = defaultVolumeSize;
}
public String getS3gAddress() {
return s3gAddress;
}
@Config(key = "s3g.address",
defaultValue = "http://localhost:9878",
description =
"The default size of the created volumes (if not specified in the"
+ " requests).",
tags = ConfigTag.STORAGE)
public void setS3gAddress(String s3gAddress) {
this.s3gAddress = s3gAddress;
}
}
}

View File

@ -0,0 +1,72 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.csi;
import org.apache.hadoop.ozone.util.OzoneVersionInfo;
import com.google.protobuf.BoolValue;
import csi.v1.Csi.GetPluginCapabilitiesResponse;
import csi.v1.Csi.GetPluginInfoResponse;
import csi.v1.Csi.PluginCapability;
import csi.v1.Csi.PluginCapability.Service;
import static csi.v1.Csi.PluginCapability.Service.Type.CONTROLLER_SERVICE;
import csi.v1.Csi.ProbeResponse;
import csi.v1.IdentityGrpc.IdentityImplBase;
import io.grpc.stub.StreamObserver;
/**
* Implementation of the CSI identity service.
*/
public class IdentitiyService extends IdentityImplBase {
@Override
public void getPluginInfo(csi.v1.Csi.GetPluginInfoRequest request,
StreamObserver<csi.v1.Csi.GetPluginInfoResponse> responseObserver) {
GetPluginInfoResponse response = GetPluginInfoResponse.newBuilder()
.setName("org.apache.hadoop.ozone")
.setVendorVersion(OzoneVersionInfo.OZONE_VERSION_INFO.getVersion())
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
@Override
public void getPluginCapabilities(
csi.v1.Csi.GetPluginCapabilitiesRequest request,
StreamObserver<GetPluginCapabilitiesResponse> responseObserver) {
GetPluginCapabilitiesResponse response =
GetPluginCapabilitiesResponse.newBuilder()
.addCapabilities(PluginCapability.newBuilder().setService(
Service.newBuilder().setType(CONTROLLER_SERVICE)))
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
@Override
public void probe(csi.v1.Csi.ProbeRequest request,
StreamObserver<csi.v1.Csi.ProbeResponse> responseObserver) {
ProbeResponse response = ProbeResponse.newBuilder()
.setReady(BoolValue.of(true))
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
}

View File

@ -0,0 +1,142 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.csi;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.ozone.csi.CsiServer.CsiConfig;
import csi.v1.Csi.NodeGetCapabilitiesRequest;
import csi.v1.Csi.NodeGetCapabilitiesResponse;
import csi.v1.Csi.NodeGetInfoRequest;
import csi.v1.Csi.NodeGetInfoResponse;
import csi.v1.Csi.NodePublishVolumeRequest;
import csi.v1.Csi.NodePublishVolumeResponse;
import csi.v1.Csi.NodeUnpublishVolumeRequest;
import csi.v1.Csi.NodeUnpublishVolumeResponse;
import csi.v1.NodeGrpc.NodeImplBase;
import io.grpc.stub.StreamObserver;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of the CSI node service.
*/
public class NodeService extends NodeImplBase {
private static final Logger LOG = LoggerFactory.getLogger(NodeService.class);
private String s3Endpoint;
public NodeService(CsiConfig configuration) {
this.s3Endpoint = configuration.getS3gAddress();
}
@Override
public void nodePublishVolume(NodePublishVolumeRequest request,
StreamObserver<NodePublishVolumeResponse> responseObserver) {
try {
Files.createDirectories(Paths.get(request.getTargetPath()));
String mountCommand =
String.format("goofys --endpoint %s %s %s",
s3Endpoint,
request.getVolumeId(),
request.getTargetPath());
LOG.info("Executing {}", mountCommand);
executeCommand(mountCommand);
responseObserver.onNext(NodePublishVolumeResponse.newBuilder()
.build());
responseObserver.onCompleted();
} catch (Exception e) {
responseObserver.onError(e);
}
}
private void executeCommand(String mountCommand)
throws IOException, InterruptedException {
Process exec = Runtime.getRuntime().exec(mountCommand);
exec.waitFor(10, TimeUnit.SECONDS);
LOG.info("Command is executed with stdout: {}, stderr: {}",
IOUtils.toString(exec.getInputStream(), "UTF-8"),
IOUtils.toString(exec.getErrorStream(), "UTF-8"));
if (exec.exitValue() != 0) {
throw new RuntimeException(String
.format("Return code of the command %s was %d", mountCommand,
exec.exitValue()));
}
}
@Override
public void nodeUnpublishVolume(NodeUnpublishVolumeRequest request,
StreamObserver<NodeUnpublishVolumeResponse> responseObserver) {
String umountCommand =
String.format("fusermount -u %s", request.getTargetPath());
LOG.info("Executing {}", umountCommand);
try {
executeCommand(umountCommand);
responseObserver.onNext(NodeUnpublishVolumeResponse.newBuilder()
.build());
responseObserver.onCompleted();
} catch (Exception e) {
responseObserver.onError(e);
}
}
@Override
public void nodeGetCapabilities(NodeGetCapabilitiesRequest request,
StreamObserver<NodeGetCapabilitiesResponse> responseObserver) {
NodeGetCapabilitiesResponse response =
NodeGetCapabilitiesResponse.newBuilder()
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
@Override
public void nodeGetInfo(NodeGetInfoRequest request,
StreamObserver<NodeGetInfoResponse> responseObserver) {
NodeGetInfoResponse response = null;
try {
response = NodeGetInfoResponse.newBuilder()
.setNodeId(InetAddress.getLocalHost().getHostName())
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
} catch (UnknownHostException e) {
responseObserver.onError(e);
}
}
}

View File

@ -0,0 +1,22 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.csi;
/**
* Container Storage Interface server implementation for Ozone.
*/

File diff suppressed because it is too large Load Diff

View File

@ -68,6 +68,13 @@
<classifier>classpath</classifier> <classifier>classpath</classifier>
<destFileName>hadoop-ozone-s3gateway.classpath</destFileName> <destFileName>hadoop-ozone-s3gateway.classpath</destFileName>
</artifactItem> </artifactItem>
<artifactItem>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-csi</artifactId>
<version>${ozone.version}</version>
<classifier>classpath</classifier>
<destFileName>hadoop-ozone-csi.classpath</destFileName>
</artifactItem>
<artifactItem> <artifactItem>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-ozone-manager</artifactId> <artifactId>hadoop-ozone-ozone-manager</artifactId>
@ -133,6 +140,29 @@
<includeScope>runtime</includeScope> <includeScope>runtime</includeScope>
</configuration> </configuration>
</execution> </execution>
<execution>
<id>copy-omitted-jars</id>
<phase>prepare-package</phase>
<goals>
<goal>copy</goal>
</goals>
<configuration>
<outputDirectory>target/ozone-${ozone.version}/share/ozone/lib
</outputDirectory>
<artifactItems>
<artifactItem>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>3.5.1</version>
</artifactItem>
<artifactItem>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>26.0-android</version>
</artifactItem>
</artifactItems>
</configuration>
</execution>
</executions> </executions>
</plugin> </plugin>
<plugin> <plugin>
@ -247,6 +277,10 @@
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-s3gateway</artifactId> <artifactId>hadoop-ozone-s3gateway</artifactId>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-csi</artifactId>
</dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-ozone-manager</artifactId> <artifactId>hadoop-ozone-ozone-manager</artifactId>

View File

@ -19,3 +19,7 @@ FROM apache/hadoop-runner:jdk11
ADD --chown=hadoop . /opt/hadoop ADD --chown=hadoop . /opt/hadoop
WORKDIR /opt/hadoop WORKDIR /opt/hadoop
RUN sudo wget https://os.anzix.net/goofys -O /usr/bin/goofys
RUN sudo chmod 755 /usr/bin/goofys
RUN sudo yum install -y fuse

View File

@ -56,6 +56,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-s3gateway</artifactId> <artifactId>hadoop-ozone-s3gateway</artifactId>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-csi</artifactId>
</dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-recon</artifactId> <artifactId>hadoop-ozone-recon</artifactId>

View File

@ -52,4 +52,4 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
configurationPropsToSkipCompare.add(OzoneConfigKeys. configurationPropsToSkipCompare.add(OzoneConfigKeys.
OZONE_S3_TOKEN_MAX_LIFETIME_KEY); OZONE_S3_TOKEN_MAX_LIFETIME_KEY);
} }
} }

View File

@ -52,6 +52,7 @@
<module>ozone-recon</module> <module>ozone-recon</module>
<module>ozone-recon-codegen</module> <module>ozone-recon-codegen</module>
<module>upgrade</module> <module>upgrade</module>
<module>csi</module>
</modules> </modules>
<repositories> <repositories>
@ -89,6 +90,11 @@
<artifactId>hadoop-ozone-s3gateway</artifactId> <artifactId>hadoop-ozone-s3gateway</artifactId>
<version>${ozone.version}</version> <version>${ozone.version}</version>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-csi</artifactId>
<version>${ozone.version}</version>
</dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-datanode</artifactId> <artifactId>hadoop-ozone-datanode</artifactId>
@ -114,6 +120,11 @@
<artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId> <artifactId>hadoop-ozone-filesystem-lib-legacy</artifactId>
<version>${ozone.version}</version> <version>${ozone.version}</version>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-config</artifactId>
<version>${hdds.version}</version>
</dependency>
<dependency> <dependency>
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-integration-test</artifactId> <artifactId>hadoop-ozone-integration-test</artifactId>