diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/helpers/KsmVolumeArgs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/helpers/KsmVolumeArgs.java new file mode 100644 index 00000000000..359c2d540da --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ksm/helpers/KsmVolumeArgs.java @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *
+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ksm.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.KeyValue;
+
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+
+/**
+ * A class that encapsulates the KsmVolumeArgs Args.
+ */
+public final class KsmVolumeArgs {
+ private final String adminName;
+ private final String ownerName;
+ private final String volume;
+ private final long quotaInBytes;
+ private final Map
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ksm.protocolPB;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
+import org.apache.hadoop.ksm.protocol.KeyspaceManagerProtocol;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.Status;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * The client side implementation of KeyspaceManagerProtocol.
+ */
+
+@InterfaceAudience.Private
+public final class KeySpaceManagerProtocolClientSideTranslatorPB
+ implements KeyspaceManagerProtocol, ProtocolTranslator, Closeable {
+
+ /**
+ * RpcController is not used and hence is set to null.
+ */
+ private static final RpcController NULL_RPC_CONTROLLER = null;
+
+ private final KeySpaceManagerProtocolPB rpcProxy;
+
+ /**
+ * Constructor for KeySpaceManger Client.
+ * @param rpcProxy
+ */
+ public KeySpaceManagerProtocolClientSideTranslatorPB(
+ KeySpaceManagerProtocolPB rpcProxy) {
+ this.rpcProxy = rpcProxy;
+ }
+
+ /**
+ * Closes this stream and releases any system resources associated
+ * with it. If the stream is already closed then invoking this
+ * method has no effect.
+ *
+ * As noted in {@link AutoCloseable#close()}, cases where the
+ * close may fail require careful attention. It is strongly advised
+ * to relinquish the underlying resources and to internally
+ * mark the {@code Closeable} as closed, prior to throwing
+ * the {@code IOException}.
+ *
+ * @throws IOException if an I/O error occurs
+ */
+ @Override
+ public void close() throws IOException {
+
+ }
+
+ /**
+ * Creates a volume.
+ *
+ * @param args - Arguments to create Volume.
+ * @throws IOException
+ */
+ @Override
+ public void createVolume(KsmVolumeArgs args) throws IOException {
+ CreateVolumeRequest.Builder req =
+ CreateVolumeRequest.newBuilder();
+ VolumeInfo volumeInfo = args.getProtobuf();
+ req.setVolumeInfo(volumeInfo);
+
+ final CreateVolumeResponse resp;
+ try {
+ resp = rpcProxy.createVolume(NULL_RPC_CONTROLLER,
+ req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Volume creation failed error" + resp.getStatus());
+ }
+ }
+
+ /**
+ * Changes the owner of a volume.
+ *
+ * @param volume - Name of the volume.
+ * @param owner - Name of the owner.
+ * @throws IOException
+ */
+ @Override
+ public void setOwner(String volume, String owner) throws IOException {
+
+ }
+
+ /**
+ * Changes the Quota on a volume.
+ *
+ * @param volume - Name of the volume.
+ * @param quota - Quota in bytes.
+ * @throws IOException
+ */
+ @Override
+ public void setQuota(String volume, long quota) throws IOException {
+
+ }
+
+ /**
+ * Checks if the specified user can access this volume.
+ *
+ * @param volume - volume
+ * @param userName - user name
+ * @throws IOException
+ */
+ @Override
+ public void checkVolumeAccess(String volume, String userName) throws
+ IOException {
+
+ }
+
+ /**
+ * Gets the volume information.
+ *
+ * @param volume - Volume name.s
+ * @return KsmVolumeArgs or exception is thrown.
+ * @throws IOException
+ */
+ @Override
+ public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
+ return null;
+ }
+
+ /**
+ * Deletes the an exisiting empty volume.
+ *
+ * @param volume - Name of the volume.
+ * @throws IOException
+ */
+ @Override
+ public void deleteVolume(String volume) throws IOException {
+
+ }
+
+ /**
+ * Lists volume owned by a specific user.
+ *
+ * @param userName - user name
+ * @param prefix - Filter prefix -- Return only entries that match this.
+ * @param prevKey - Previous key -- List starts from the next from the
+ * prevkey
+ * @param maxKeys - Max number of keys to return.
+ * @return List of Volumes.
+ * @throws IOException
+ */
+ @Override
+ public List
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.ksm;
+
+import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
+
+import java.io.IOException;
+
+/**
+ * KSM volume manager interface.
+ */
+public interface VolumeManager {
+ /**
+ * Start volume manager.
+ */
+ void start();
+
+ /**
+ * Stop volume manager.
+ */
+ void stop() throws IOException;
+
+ /**
+ * Create a new volume.
+ * @param args - Volume args to create a volume
+ */
+ void createVolume(KsmVolumeArgs args) throws IOException;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java
new file mode 100644
index 00000000000..1e63127a389
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.ksm;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
+import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.utils.LevelDBStore;
+import org.iq80.leveldb.DBException;
+import org.iq80.leveldb.Options;
+import org.iq80.leveldb.WriteBatch;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
+import static org.apache.hadoop.ozone.ksm
+ .KSMConfigKeys.OZONE_KSM_DB_CACHE_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone.ksm
+ .KSMConfigKeys.OZONE_KSM_DB_CACHE_SIZE_MB;
+import static org.apache.hadoop.ozone.ksm
+ .KSMConfigKeys.OZONE_KSM_USER_MAX_VOLUME_DEFAULT;
+import static org.apache.hadoop.ozone.ksm
+ .KSMConfigKeys.OZONE_KSM_USER_MAX_VOLUME;
+import static org.apache.hadoop.ozone.ksm.exceptions
+ .KSMException.ResultCodes;
+
+/**
+ * KSM volume management code.
+ */
+public class VolumeManagerImpl implements VolumeManager {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(VolumeManagerImpl.class);
+
+ private final KeySpaceManager ksm;
+ private final LevelDBStore store;
+ private final ReadWriteLock lock;
+ private final int maxUserVolumeCount;
+
+ /**
+ * Constructor.
+ * @param conf - Ozone configuration.
+ * @throws IOException
+ */
+ public VolumeManagerImpl(KeySpaceManager ksm, OzoneConfiguration conf)
+ throws IOException {
+ File metaDir = OzoneUtils.getScmMetadirPath(conf);
+ final int cacheSize = conf.getInt(OZONE_KSM_DB_CACHE_SIZE_MB,
+ OZONE_KSM_DB_CACHE_SIZE_DEFAULT);
+ Options options = new Options();
+ options.cacheSize(cacheSize * OzoneConsts.MB);
+ File ksmDBFile = new File(metaDir.getPath(), KSM_DB_NAME);
+ this.ksm = ksm;
+ this.store = new LevelDBStore(ksmDBFile, options);
+ lock = new ReentrantReadWriteLock();
+ this.maxUserVolumeCount = conf.getInt(OZONE_KSM_USER_MAX_VOLUME,
+ OZONE_KSM_USER_MAX_VOLUME_DEFAULT);
+ }
+
+ @Override
+ public void start() {
+ }
+
+ @Override
+ public void stop() throws IOException {
+ store.close();
+ }
+
+ /**
+ * Creates a volume.
+ * @param args - KsmVolumeArgs.
+ */
+ @Override
+ public void createVolume(KsmVolumeArgs args) throws IOException {
+ Preconditions.checkNotNull(args);
+ lock.writeLock().lock();
+ WriteBatch batch = store.createWriteBatch();
+ try {
+ byte[] volumeName = store.get(DFSUtil.string2Bytes(args.getVolume()));
+
+ // Check of the volume already exists
+ if(volumeName != null) {
+ LOG.error("volume:{} already exists", args.getVolume());
+ throw new KSMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS);
+ }
+
+ // Next count the number of volumes for the user
+ String dbUserName = "$" + args.getOwnerName();
+ byte[] volumeList = store.get(DFSUtil.string2Bytes(dbUserName));
+ List prevVolList;
+ if (volumeList != null) {
+ VolumeList vlist = VolumeList.parseFrom(volumeList);
+ prevVolList = vlist.getVolumeNamesList();
+ } else {
+ prevVolList = new LinkedList();
+ }
+
+ if (prevVolList.size() >= maxUserVolumeCount) {
+ LOG.error("Too many volumes for user:{}", args.getOwnerName());
+ throw new KSMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES);
+ }
+
+ // Commit the volume information to leveldb
+ VolumeInfo volumeInfo = args.getProtobuf();
+ batch.put(DFSUtil.string2Bytes(args.getVolume()),
+ volumeInfo.toByteArray());
+
+ prevVolList.add(args.getVolume());
+ VolumeList newVolList = VolumeList.newBuilder()
+ .addAllVolumeNames(prevVolList).build();
+ batch.put(DFSUtil.string2Bytes(dbUserName), newVolList.toByteArray());
+ store.commitWriteBatch(batch);
+ LOG.info("created volume:{} user:{}",
+ args.getVolume(), args.getOwnerName());
+ } catch (IOException | DBException ex) {
+ ksm.getMetrics().incNumVolumeCreateFails();
+ LOG.error("Volume creation failed for user:{} volname:{}",
+ args.getOwnerName(), args.getVolume(), ex);
+ throw ex;
+ } finally {
+ store.closeWriteBatch(batch);
+ lock.writeLock().unlock();
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java
new file mode 100644
index 00000000000..1a1b3a941ac
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.ksm.exceptions;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown by KSM.
+ */
+public class KSMException extends IOException {
+ private final KSMException.ResultCodes result;
+
+ /**
+ * Constructs an {@code IOException} with {@code null}
+ * as its error detail message.
+ */
+ public KSMException(KSMException.ResultCodes result) {
+ this.result = result;
+ }
+
+ /**
+ * Constructs an {@code IOException} with the specified detail message.
+ *
+ * @param message The detail message (which is saved for later retrieval by
+ * the
+ * {@link #getMessage()} method)
+ */
+ public KSMException(String message, KSMException.ResultCodes result) {
+ super(message);
+ this.result = result;
+ }
+
+ /**
+ * Constructs an {@code IOException} with the specified detail message
+ * and cause.
+ *
+ * Note that the detail message associated with {@code cause} is
+ * not automatically incorporated into this exception's detail
+ * message.
+ *
+ * @param message The detail message (which is saved for later retrieval by
+ * the
+ * {@link #getMessage()} method)
+ * @param cause The cause (which is saved for later retrieval by the {@link
+ * #getCause()} method). (A null value is permitted, and indicates that the
+ * cause is nonexistent or unknown.)
+ * @since 1.6
+ */
+ public KSMException(String message, Throwable cause,
+ KSMException.ResultCodes result) {
+ super(message, cause);
+ this.result = result;
+ }
+
+ /**
+ * Constructs an {@code IOException} with the specified cause and a
+ * detail message of {@code (cause==null ? null : cause.toString())}
+ * (which typically contains the class and detail message of {@code cause}).
+ * This constructor is useful for IO exceptions that are little more
+ * than wrappers for other throwables.
+ *
+ * @param cause The cause (which is saved for later retrieval by the {@link
+ * #getCause()} method). (A null value is permitted, and indicates that the
+ * cause is nonexistent or unknown.)
+ * @since 1.6
+ */
+ public KSMException(Throwable cause, KSMException.ResultCodes result) {
+ super(cause);
+ this.result = result;
+ }
+
+ /**
+ * Returns resultCode.
+ * @return ResultCode
+ */
+ public KSMException.ResultCodes getResult() {
+ return result;
+ }
+
+ /**
+ * Error codes to make it easy to decode these exceptions.
+ */
+ public enum ResultCodes {
+ FAILED_TOO_MANY_USER_VOLUMES,
+ FAILED_VOLUME_ALREADY_EXISTS,
+ FAILED_INTERNAL_ERROR
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java
new file mode 100644
index 00000000000..09fd87f22c9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.ksm.exceptions;
+// Exception thrown by KSM.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/KeyspaceManagerProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/KeyspaceManagerProtocolServerSideTranslatorPB.java
index 0725d257077..aa52c17b376 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/KeyspaceManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/KeyspaceManagerProtocolServerSideTranslatorPB.java
@@ -18,9 +18,40 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
+import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ksm.protocol.KeyspaceManagerProtocol;
import org.apache.hadoop.ksm.protocolPB.KeySpaceManagerProtocolPB;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
+import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
+import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.CheckVolumeAccessRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.InfoVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.InfoVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.ListVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.ListVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .KeySpaceManagerProtocolProtos.Status;
+
+
+import java.io.IOException;
/**
* This class is the server-side translator that forwards requests received on
@@ -42,47 +73,61 @@ public KeyspaceManagerProtocolServerSideTranslatorPB(
}
@Override
- public KeySpaceManagerProtocolProtos.CreateVolumeResponse createVolume(
- RpcController controller, KeySpaceManagerProtocolProtos
- .CreateVolumeRequest
- request) throws ServiceException {
- return null;
+ public CreateVolumeResponse createVolume(
+ RpcController controller, CreateVolumeRequest request)
+ throws ServiceException {
+ CreateVolumeResponse.Builder resp = CreateVolumeResponse.newBuilder();
+ resp.setStatus(Status.OK);
+ try {
+ impl.createVolume(KsmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
+ } catch (IOException e) {
+ if (e instanceof KSMException) {
+ KSMException ksmException = (KSMException)e;
+ if (ksmException.getResult() ==
+ ResultCodes.FAILED_VOLUME_ALREADY_EXISTS) {
+ resp.setStatus(Status.VOLUME_ALREADY_EXISTS);
+ } else if (ksmException.getResult() ==
+ ResultCodes.FAILED_TOO_MANY_USER_VOLUMES) {
+ resp.setStatus(Status.USER_TOO_MANY_VOLUMES);
+ }
+ } else {
+ resp.setStatus(Status.INTERNAL_ERROR);
+ }
+ }
+ return resp.build();
}
@Override
- public KeySpaceManagerProtocolProtos.SetVolumePropertyResponse
- setVolumeProperty(RpcController controller, KeySpaceManagerProtocolProtos
- .SetVolumePropertyRequest request) throws ServiceException {
- return null;
- }
-
- @Override
- public KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse
- checkVolumeAccess(RpcController controller, KeySpaceManagerProtocolProtos
- .CheckVolumeAccessRequest request) throws ServiceException {
- return null;
- }
-
- @Override
- public KeySpaceManagerProtocolProtos.InfoVolumeResponse infoVolume(
- RpcController controller,
- KeySpaceManagerProtocolProtos.InfoVolumeRequest request)
+ public SetVolumePropertyResponse setVolumeProperty(
+ RpcController controller, SetVolumePropertyRequest request)
throws ServiceException {
return null;
}
@Override
- public KeySpaceManagerProtocolProtos.DeleteVolumeResponse deleteVolume(
- RpcController controller, KeySpaceManagerProtocolProtos
- .DeleteVolumeRequest
- request) throws ServiceException {
+ public CheckVolumeAccessResponse checkVolumeAccess(
+ RpcController controller, CheckVolumeAccessRequest request)
+ throws ServiceException {
return null;
}
@Override
- public KeySpaceManagerProtocolProtos.ListVolumeResponse listVolumes(
- RpcController controller,
- KeySpaceManagerProtocolProtos.ListVolumeRequest request)
+ public InfoVolumeResponse infoVolume(
+ RpcController controller, InfoVolumeRequest request)
+ throws ServiceException {
+ return null;
+ }
+
+ @Override
+ public DeleteVolumeResponse deleteVolume(
+ RpcController controller, DeleteVolumeRequest request)
+ throws ServiceException {
+ return null;
+ }
+
+ @Override
+ public ListVolumeResponse listVolumes(
+ RpcController controller, ListVolumeRequest request)
throws ServiceException {
return null;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
index f30f2ae0483..e96d3d10cd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
@@ -23,6 +23,8 @@
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
+import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
+import org.apache.hadoop.ksm.protocolPB.KeySpaceManagerProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
@@ -47,7 +49,13 @@
import java.io.IOException;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
-import java.util.*;
+import java.util.Date;
+import java.util.Set;
+import java.util.TimeZone;
+import java.util.Locale;
+import java.util.HashSet;
+import java.util.Arrays;
+import java.util.List;
import static org.apache.hadoop.ozone.web.storage.OzoneContainerTranslation.*;
import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.getKey;
@@ -62,7 +70,9 @@ public final class DistributedStorageHandler implements StorageHandler {
LoggerFactory.getLogger(DistributedStorageHandler.class);
private final StorageContainerLocationProtocolClientSideTranslatorPB
- storageContainerLocation;
+ storageContainerLocationClient;
+ private final KeySpaceManagerProtocolClientSideTranslatorPB
+ keySpaceManagerClient;
private final XceiverClientManager xceiverClientManager;
private int chunkSize;
@@ -72,11 +82,15 @@ public final class DistributedStorageHandler implements StorageHandler {
*
* @param conf configuration
* @param storageContainerLocation StorageContainerLocationProtocol proxy
+ * @param keySpaceManagerClient KeySpaceManager proxy
*/
public DistributedStorageHandler(OzoneConfiguration conf,
StorageContainerLocationProtocolClientSideTranslatorPB
- storageContainerLocation) {
- this.storageContainerLocation = storageContainerLocation;
+ storageContainerLocation,
+ KeySpaceManagerProtocolClientSideTranslatorPB
+ keySpaceManagerClient) {
+ this.keySpaceManagerClient = keySpaceManagerClient;
+ this.storageContainerLocationClient = storageContainerLocation;
this.xceiverClientManager = new XceiverClientManager(conf);
chunkSize = conf.getInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
@@ -92,21 +106,15 @@ public DistributedStorageHandler(OzoneConfiguration conf,
@Override
public void createVolume(VolumeArgs args) throws IOException, OzoneException {
- String containerKey = buildContainerKey(args.getVolumeName());
- XceiverClientSpi xceiverClient = acquireXceiverClient(containerKey);
- try {
- VolumeInfo volume = new VolumeInfo();
- volume.setVolumeName(args.getVolumeName());
- volume.setQuota(args.getQuota());
- volume.setOwner(new VolumeOwner(args.getUserName()));
- volume.setCreatedOn(dateToString(new Date()));
- volume.setCreatedBy(args.getAdminName());
- KeyData containerKeyData = fromVolumeToContainerKeyData(
- xceiverClient.getPipeline().getContainerName(), containerKey, volume);
- putKey(xceiverClient, containerKeyData, args.getRequestID());
- } finally {
- xceiverClientManager.releaseClient(xceiverClient);
- }
+ long quota = args.getQuota() == null ?
+ Long.MAX_VALUE : args.getQuota().sizeInBytes();
+ KsmVolumeArgs volumeArgs = KsmVolumeArgs.newBuilder()
+ .setAdminName(args.getAdminName())
+ .setOwnerName(args.getUserName())
+ .setVolume(args.getVolumeName())
+ .setQuotaInBytes(quota)
+ .build();
+ keySpaceManagerClient.createVolume(volumeArgs);
}
@Override
@@ -293,9 +301,9 @@ public ListKeys listKeys(ListArgs args) throws IOException, OzoneException {
}
/**
- * Acquires an {@link XceiverClientSpi} connected to a {@link Pipeline} of nodes
- * capable of serving container protocol operations. The container is
- * selected based on the specified container key.
+ * Acquires an {@link XceiverClientSpi} connected to a {@link Pipeline}
+ * of nodes capable of serving container protocol operations.
+ * The container is selected based on the specified container key.
*
* @param containerKey container key
* @return XceiverClient connected to a container
@@ -304,7 +312,7 @@ public ListKeys listKeys(ListArgs args) throws IOException, OzoneException {
private XceiverClientSpi acquireXceiverClient(String containerKey)
throws IOException {
Set
+ * Ozone is made active by setting OZONE_ENABLED = true and
+ * OZONE_HANDLER_TYPE_KEY = "distributed"
+ *
+ * @throws IOException
+ */
+ @BeforeClass
+ public static void init() throws Exception {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
+ conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+ OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+ cluster = new MiniOzoneCluster.Builder(conf)
+ .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
+ DataNode dataNode = cluster.getDataNodes().get(0);
+ port = dataNode.getInfoPort();
+ }
+
+ /**
+ * Shutdown MiniDFSCluster.
+ */
+ @AfterClass
+ public static void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * Creates Volumes on Ozone Store.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testCreateVolumes() throws IOException {
+ super.testCreateVolumes(port);
+ Assert.assertEquals(cluster.getKeySpaceManager()
+ .getMetrics().getNumVolumeCreates(), 1);
+ Assert.assertEquals(cluster.getKeySpaceManager()
+ .getMetrics().getNumVolumeCreateFails(), 0);
+ }
+
+ /**
+ * Create Volumes with Quota.
+ *
+ * @throws IOException
+ */
+ public void testCreateVolumesWithQuota() throws IOException {
+ super.testCreateVolumesWithQuota(port);
+ }
+
+ /**
+ * Create Volumes with Invalid Quota.
+ *
+ * @throws IOException
+ */
+ public void testCreateVolumesWithInvalidQuota() throws IOException {
+ super.testCreateVolumesWithInvalidQuota(port);
+ }
+
+ /**
+ * To create a volume a user name must be specified using OZONE_USER header.
+ * This test verifies that we get an error in case we call without a OZONE
+ * user name.
+ *
+ * @throws IOException
+ */
+ public void testCreateVolumesWithInvalidUser() throws IOException {
+ super.testCreateVolumesWithInvalidUser(port);
+ }
+
+ /**
+ * Only Admins can create volumes in Ozone. This test uses simple userauth as
+ * backend and hdfs and root are admin users in the simple backend.
+ *
+ * This test tries to create a volume as user bilbo.
+ *
+ * @throws IOException
+ */
+ public void testCreateVolumesWithOutAdminRights() throws IOException {
+ super.testCreateVolumesWithOutAdminRights(port);
+ }
+
+ /**
+ * Create a bunch of volumes in a loop.
+ *
+ * @throws IOException
+ */
+ public void testCreateVolumesInLoop() throws IOException {
+ super.testCreateVolumesInLoop(port);
+ }
+ /**
+ * Get volumes owned by the user.
+ *
+ * @throws IOException
+ */
+ public void testGetVolumesByUser() throws IOException {
+ testGetVolumesByUser(port);
+ }
+
+ /**
+ * Admins can read volumes belonging to other users.
+ *
+ * @throws IOException
+ */
+ public void testGetVolumesOfAnotherUser() throws IOException {
+ super.testGetVolumesOfAnotherUser(port);
+ }
+
+ /**
+ * if you try to read volumes belonging to another user,
+ * then server always ignores it.
+ *
+ * @throws IOException
+ */
+ public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
+ super.testGetVolumesOfAnotherUserShouldFail(port);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java
new file mode 100644
index 00000000000..78e6c0f9484
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.web;
+
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.net.URL;
+
+/**
+ * Test ozone volume in the local storage handler scenario.
+ */
+public class TestLocalOzoneVolumes extends TestOzoneHelper {
+ /**
+ * Set the timeout for every test.
+ */
+ @Rule
+ public Timeout testTimeout = new Timeout(300000);
+
+ private static MiniOzoneCluster cluster = null;
+ private static int port = 0;
+
+ /**
+ * Create a MiniDFSCluster for testing.
+ *
+ * Ozone is made active by setting OZONE_ENABLED = true and
+ * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
+ * emulate Ozone backend.
+ *
+ * @throws IOException
+ */
+ @BeforeClass
+ public static void init() throws Exception {
+ OzoneConfiguration conf = new OzoneConfiguration();
+
+ URL p = conf.getClass().getResource("");
+ String path = p.getPath()
+ .concat(TestLocalOzoneVolumes.class.getSimpleName());
+ path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+ OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
+
+ conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+ Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
+
+ cluster = new MiniOzoneCluster.Builder(conf)
+ .setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build();
+ DataNode dataNode = cluster.getDataNodes().get(0);
+ port = dataNode.getInfoPort();
+ }
+
+ /**
+ * Shutdown MiniDFSCluster.
+ */
+ @AfterClass
+ public static void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * Creates Volumes on Ozone Store.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testCreateVolumes() throws IOException {
+ super.testCreateVolumes(port);
+ }
+
+ /**
+ * Create Volumes with Quota.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testCreateVolumesWithQuota() throws IOException {
+ super.testCreateVolumesWithQuota(port);
+ }
+
+ /**
+ * Create Volumes with Invalid Quota.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testCreateVolumesWithInvalidQuota() throws IOException {
+ super.testCreateVolumesWithInvalidQuota(port);
+ }
+
+ /**
+ * To create a volume a user name must be specified using OZONE_USER header.
+ * This test verifies that we get an error in case we call without a OZONE
+ * user name.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testCreateVolumesWithInvalidUser() throws IOException {
+ super.testCreateVolumesWithInvalidUser(port);
+ }
+
+ /**
+ * Only Admins can create volumes in Ozone. This test uses simple userauth as
+ * backend and hdfs and root are admin users in the simple backend.
+ *
+ * This test tries to create a volume as user bilbo.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testCreateVolumesWithOutAdminRights() throws IOException {
+ super.testCreateVolumesWithOutAdminRights(port);
+ }
+
+ /**
+ * Create a bunch of volumes in a loop.
+ *
+ * @throws IOException
+ */
+ //@Test
+ public void testCreateVolumesInLoop() throws IOException {
+ super.testCreateVolumesInLoop(port);
+ }
+ /**
+ * Get volumes owned by the user.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testGetVolumesByUser() throws IOException {
+ super.testGetVolumesByUser(port);
+ }
+
+ /**
+ * Admins can read volumes belonging to other users.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testGetVolumesOfAnotherUser() throws IOException {
+ super.testGetVolumesOfAnotherUser(port);
+ }
+
+ /**
+ * if you try to read volumes belonging to another user,
+ * then server always ignores it.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
+ super.testGetVolumesOfAnotherUserShouldFail(port);
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneHelper.java
similarity index 76%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneHelper.java
index c339279a1ff..73955969064 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneHelper.java
@@ -17,31 +17,19 @@
*/
package org.apache.hadoop.ozone.web;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
-import org.apache.http.impl.client.DefaultHttpClient;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
-import java.net.URL;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
@@ -50,63 +38,23 @@
import static java.net.HttpURLConnection.HTTP_OK;
import static org.junit.Assert.assertEquals;
-public class TestOzoneVolumes {
- /**
- * Set the timeout for every test.
- */
- @Rule
- public Timeout testTimeout = new Timeout(300000);
+/**
+ * Helper functions to test Ozone.
+ */
+public class TestOzoneHelper {
- private static MiniOzoneCluster cluster = null;
- private static int port = 0;
-
- /**
- * Create a MiniDFSCluster for testing.
- *
- * Ozone is made active by setting OZONE_ENABLED = true and
- * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
- * emulate Ozone backend.
- *
- * @throws IOException
- */
- @BeforeClass
- public static void init() throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
-
- URL p = conf.getClass().getResource("");
- String path = p.getPath().concat(TestOzoneVolumes.class.getSimpleName());
- path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
- OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
-
- conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
- Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
-
- cluster = new MiniOzoneCluster.Builder(conf)
- .setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build();
- DataNode dataNode = cluster.getDataNodes().get(0);
- port = dataNode.getInfoPort();
+ public CloseableHttpClient createHttpClient() {
+ return HttpClientBuilder.create().build();
}
-
- /**
- * shutdown MiniDFSCluster
- */
- @AfterClass
- public static void shutdown() {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
-
/**
* Creates Volumes on Ozone Store.
*
* @throws IOException
*/
- @Test
- public void testCreateVolumes() throws IOException {
+ public void testCreateVolumes(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
- HttpClient client = new DefaultHttpClient();
+ CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
@@ -125,7 +73,7 @@ public void testCreateVolumes() throws IOException {
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
} finally {
- client.getConnectionManager().shutdown();
+ client.close();
}
}
@@ -134,11 +82,10 @@ public void testCreateVolumes() throws IOException {
*
* @throws IOException
*/
- @Test
- public void testCreateVolumesWithQuota() throws IOException {
+ public void testCreateVolumesWithQuota(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
- HttpClient client = new DefaultHttpClient();
+ CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
@@ -157,7 +104,7 @@ public void testCreateVolumesWithQuota() throws IOException {
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
} finally {
- client.getConnectionManager().shutdown();
+ client.close();
}
}
@@ -166,11 +113,10 @@ public void testCreateVolumesWithQuota() throws IOException {
*
* @throws IOException
*/
- @Test
- public void testCreateVolumesWithInvalidQuota() throws IOException {
+ public void testCreateVolumesWithInvalidQuota(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
- HttpClient client = new DefaultHttpClient();
+ CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
@@ -190,7 +136,7 @@ public void testCreateVolumesWithInvalidQuota() throws IOException {
.getHttpCode(),
response.getStatusLine().getStatusCode());
} finally {
- client.getConnectionManager().shutdown();
+ client.close();
}
}
@@ -201,11 +147,10 @@ public void testCreateVolumesWithInvalidQuota() throws IOException {
*
* @throws IOException
*/
- @Test
- public void testCreateVolumesWithInvalidUser() throws IOException {
+ public void testCreateVolumesWithInvalidUser(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
- HttpClient client = new DefaultHttpClient();
+ CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
@@ -224,7 +169,7 @@ public void testCreateVolumesWithInvalidUser() throws IOException {
assertEquals(response.toString(), ErrorTable.USER_NOT_FOUND.getHttpCode(),
response.getStatusLine().getStatusCode());
} finally {
- client.getConnectionManager().shutdown();
+ client.close();
}
}
@@ -236,11 +181,10 @@ public void testCreateVolumesWithInvalidUser() throws IOException {
*
* @throws IOException
*/
- @Test
- public void testCreateVolumesWithOutAdminRights() throws IOException {
+ public void testCreateVolumesWithOutAdminRights(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
- HttpClient client = new DefaultHttpClient();
+ CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpPost httppost = new HttpPost(
@@ -259,7 +203,7 @@ public void testCreateVolumesWithOutAdminRights() throws IOException {
assertEquals(response.toString(), ErrorTable.ACCESS_DENIED.getHttpCode(),
response.getStatusLine().getStatusCode());
} finally {
- client.getConnectionManager().shutdown();
+ client.close();
}
}
@@ -268,13 +212,12 @@ public void testCreateVolumesWithOutAdminRights() throws IOException {
*
* @throws IOException
*/
- //@Test
- public void testCreateVolumesInLoop() throws IOException {
+ public void testCreateVolumesInLoop(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
for (int x = 0; x < 1000; x++) {
- HttpClient client = new DefaultHttpClient();
+ CloseableHttpClient client = createHttpClient();
String volumeName = OzoneUtils.getRequestID().toLowerCase();
String userName = OzoneUtils.getRequestID().toLowerCase();
@@ -293,7 +236,7 @@ public void testCreateVolumesInLoop() throws IOException {
HttpResponse response = client.execute(httppost);
assertEquals(response.toString(), HTTP_CREATED,
response.getStatusLine().getStatusCode());
- client.getConnectionManager().shutdown();
+ client.close();
}
}
/**
@@ -301,13 +244,12 @@ public void testCreateVolumesInLoop() throws IOException {
*
* @throws IOException
*/
- @Test
- public void testGetVolumesByUser() throws IOException {
+ public void testGetVolumesByUser(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
// We need to create a volume for this test to succeed.
- testCreateVolumes();
- HttpClient client = new DefaultHttpClient();
+ testCreateVolumes(port);
+ CloseableHttpClient client = createHttpClient();
try {
HttpGet httpget =
new HttpGet(String.format("http://localhost:%d/", port));
@@ -323,14 +265,14 @@ public void testGetVolumesByUser() throws IOException {
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
httpget.addHeader(Header.OZONE_USER,
- OzoneConsts.OZONE_SIMPLE_HDFS_USER );
+ OzoneConsts.OZONE_SIMPLE_HDFS_USER);
HttpResponse response = client.execute(httpget);
assertEquals(response.toString(), HTTP_OK,
response.getStatusLine().getStatusCode());
} finally {
- client.getConnectionManager().shutdown();
+ client.close();
}
}
@@ -339,12 +281,11 @@ public void testGetVolumesByUser() throws IOException {
*
* @throws IOException
*/
- @Test
- public void testGetVolumesOfAnotherUser() throws IOException {
+ public void testGetVolumesOfAnotherUser(int port) throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
- HttpClient client = new DefaultHttpClient();
+ CloseableHttpClient client = createHttpClient();
try {
HttpGet httpget =
new HttpGet(String.format("http://localhost:%d/", port));
@@ -366,7 +307,7 @@ public void testGetVolumesOfAnotherUser() throws IOException {
response.getStatusLine().getStatusCode());
} finally {
- client.getConnectionManager().shutdown();
+ client.close();
}
}
@@ -376,12 +317,12 @@ public void testGetVolumesOfAnotherUser() throws IOException {
*
* @throws IOException
*/
- @Test
- public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
+ public void testGetVolumesOfAnotherUserShouldFail(int port)
+ throws IOException {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
- HttpClient client = new DefaultHttpClient();
+ CloseableHttpClient client = createHttpClient();
String userName = OzoneUtils.getRequestID().toLowerCase();
try {
HttpGet httpget =
@@ -406,7 +347,7 @@ public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
response.getStatusLine().getStatusCode());
} finally {
- client.getConnectionManager().shutdown();
+ client.close();
}
}