HDFS-11770. Ozone: KSM: Add setVolumeProperty. Contributed by Mukul Kumar Singh.
This commit is contained in:
parent
e0704c0593
commit
3ff857f63e
|
@ -60,14 +60,14 @@ public interface KeySpaceManagerProtocol {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the volume information.
|
* Gets the volume information.
|
||||||
* @param volume - Volume name.s
|
* @param volume - Volume name.
|
||||||
* @return VolumeArgs or exception is thrown.
|
* @return VolumeArgs or exception is thrown.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
KsmVolumeArgs getVolumeInfo(String volume) throws IOException;
|
KsmVolumeArgs getVolumeInfo(String volume) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes the an exisiting empty volume.
|
* Deletes an existing empty volume.
|
||||||
* @param volume - Name of the volume.
|
* @param volume - Name of the volume.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -35,6 +35,14 @@ import org.apache.hadoop.ozone.protocol.proto
|
||||||
.KeySpaceManagerProtocolProtos.CreateVolumeRequest;
|
.KeySpaceManagerProtocolProtos.CreateVolumeRequest;
|
||||||
import org.apache.hadoop.ozone.protocol.proto
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
.KeySpaceManagerProtocolProtos.CreateVolumeResponse;
|
.KeySpaceManagerProtocolProtos.CreateVolumeResponse;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
|
.KeySpaceManagerProtocolProtos.SetVolumePropertyRequest;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
|
.KeySpaceManagerProtocolProtos.SetVolumePropertyResponse;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
|
.KeySpaceManagerProtocolProtos.InfoVolumeRequest;
|
||||||
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
|
.KeySpaceManagerProtocolProtos.InfoVolumeResponse;
|
||||||
import org.apache.hadoop.ozone.protocol.proto
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
.KeySpaceManagerProtocolProtos.VolumeInfo;
|
.KeySpaceManagerProtocolProtos.VolumeInfo;
|
||||||
import org.apache.hadoop.ozone.protocol.proto
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
|
@ -108,7 +116,8 @@ public final class KeySpaceManagerProtocolClientSideTranslatorPB
|
||||||
}
|
}
|
||||||
|
|
||||||
if (resp.getStatus() != Status.OK) {
|
if (resp.getStatus() != Status.OK) {
|
||||||
throw new IOException("Volume creation failed error" + resp.getStatus());
|
throw new
|
||||||
|
IOException("Volume creation failed, error:" + resp.getStatus());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +130,19 @@ public final class KeySpaceManagerProtocolClientSideTranslatorPB
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void setOwner(String volume, String owner) throws IOException {
|
public void setOwner(String volume, String owner) throws IOException {
|
||||||
|
SetVolumePropertyRequest.Builder req =
|
||||||
|
SetVolumePropertyRequest.newBuilder();
|
||||||
|
req.setVolumeName(volume).setOwnerName(owner);
|
||||||
|
final SetVolumePropertyResponse resp;
|
||||||
|
try {
|
||||||
|
resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
if (resp.getStatus() != Status.OK) {
|
||||||
|
throw new
|
||||||
|
IOException("Volume owner change failed, error:" + resp.getStatus());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -133,7 +154,19 @@ public final class KeySpaceManagerProtocolClientSideTranslatorPB
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void setQuota(String volume, long quota) throws IOException {
|
public void setQuota(String volume, long quota) throws IOException {
|
||||||
|
SetVolumePropertyRequest.Builder req =
|
||||||
|
SetVolumePropertyRequest.newBuilder();
|
||||||
|
req.setVolumeName(volume).setQuotaInBytes(quota);
|
||||||
|
final SetVolumePropertyResponse resp;
|
||||||
|
try {
|
||||||
|
resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
if (resp.getStatus() != Status.OK) {
|
||||||
|
throw new
|
||||||
|
IOException("Volume quota change failed, error:" + resp.getStatus());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -152,17 +185,29 @@ public final class KeySpaceManagerProtocolClientSideTranslatorPB
|
||||||
/**
|
/**
|
||||||
* Gets the volume information.
|
* Gets the volume information.
|
||||||
*
|
*
|
||||||
* @param volume - Volume name.s
|
* @param volume - Volume name.
|
||||||
* @return KsmVolumeArgs or exception is thrown.
|
* @return KsmVolumeArgs or exception is thrown.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
|
public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
|
||||||
return null;
|
InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder();
|
||||||
|
req.setVolumeName(volume);
|
||||||
|
final InfoVolumeResponse resp;
|
||||||
|
try {
|
||||||
|
resp = rpcProxy.infoVolume(NULL_RPC_CONTROLLER, req.build());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
if (resp.getStatus() != Status.OK) {
|
||||||
|
throw new
|
||||||
|
IOException("Info Volume failed, error:" + resp.getStatus());
|
||||||
|
}
|
||||||
|
return KsmVolumeArgs.getFromProtobuf(resp.getVolumeInfo());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes the an exisiting empty volume.
|
* Deletes an existing empty volume.
|
||||||
*
|
*
|
||||||
* @param volume - Name of the volume.
|
* @param volume - Name of the volume.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
|
|
@ -69,6 +69,7 @@ public final class ObjectStoreHandler implements Closeable {
|
||||||
keySpaceManagerClient;
|
keySpaceManagerClient;
|
||||||
private final StorageContainerLocationProtocolClientSideTranslatorPB
|
private final StorageContainerLocationProtocolClientSideTranslatorPB
|
||||||
storageContainerLocationClient;
|
storageContainerLocationClient;
|
||||||
|
private final StorageHandler storageHandler;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new ObjectStoreHandler.
|
* Creates a new ObjectStoreHandler.
|
||||||
|
@ -83,7 +84,6 @@ public final class ObjectStoreHandler implements Closeable {
|
||||||
OZONE_HANDLER_TYPE_KEY, shType);
|
OZONE_HANDLER_TYPE_KEY, shType);
|
||||||
boolean ozoneTrace = conf.getBoolean(OZONE_TRACE_ENABLED_KEY,
|
boolean ozoneTrace = conf.getBoolean(OZONE_TRACE_ENABLED_KEY,
|
||||||
OZONE_TRACE_ENABLED_DEFAULT);
|
OZONE_TRACE_ENABLED_DEFAULT);
|
||||||
final StorageHandler storageHandler;
|
|
||||||
|
|
||||||
// Initialize Jersey container for object store web application.
|
// Initialize Jersey container for object store web application.
|
||||||
if (OzoneConsts.OZONE_HANDLER_DISTRIBUTED.equalsIgnoreCase(shType)) {
|
if (OzoneConsts.OZONE_HANDLER_DISTRIBUTED.equalsIgnoreCase(shType)) {
|
||||||
|
@ -147,6 +147,15 @@ public final class ObjectStoreHandler implements Closeable {
|
||||||
return this.objectStoreJerseyContainer;
|
return this.objectStoreJerseyContainer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the storage handler.
|
||||||
|
*
|
||||||
|
* @return returns the storage handler
|
||||||
|
*/
|
||||||
|
public StorageHandler getStorageHandler() {
|
||||||
|
return this.storageHandler;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() {
|
public void close() {
|
||||||
LOG.info("Closing ObjectStoreHandler.");
|
LOG.info("Closing ObjectStoreHandler.");
|
||||||
|
|
|
@ -94,9 +94,17 @@ public final class OzoneConsts {
|
||||||
public static final String OZONE_HANDLER_LOCAL = "local";
|
public static final String OZONE_HANDLER_LOCAL = "local";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ozone metadata key delimiter.
|
* KSM LevelDB prefixes.
|
||||||
*/
|
*/
|
||||||
public static final String DB_KEY_DELIMITER = "/";
|
public static final String KSM_VOLUME_PREFIX = "/";
|
||||||
|
public static final String KSM_BUCKET_PREFIX = KSM_VOLUME_PREFIX;
|
||||||
|
public static final String KSM_KEY_PREFIX = KSM_VOLUME_PREFIX;
|
||||||
|
public static final String KSM_USER_PREFIX = "$";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Max KSM Quota size of 1024 PB.
|
||||||
|
*/
|
||||||
|
public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
|
||||||
|
|
||||||
private OzoneConsts() {
|
private OzoneConsts() {
|
||||||
// Never Constructed
|
// Never Constructed
|
||||||
|
|
|
@ -17,15 +17,12 @@
|
||||||
package org.apache.hadoop.ozone.ksm;
|
package org.apache.hadoop.ozone.ksm;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
|
||||||
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
|
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
|
||||||
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
|
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
|
||||||
import org.iq80.leveldb.DBException;
|
import org.iq80.leveldb.DBException;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import static org.apache.hadoop.ozone.OzoneConsts.DB_KEY_DELIMITER;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* KSM bucket manager.
|
* KSM bucket manager.
|
||||||
*/
|
*/
|
||||||
|
@ -73,35 +70,31 @@ public class BucketManagerImpl implements BucketManager {
|
||||||
public void createBucket(KsmBucketArgs args) throws KSMException {
|
public void createBucket(KsmBucketArgs args) throws KSMException {
|
||||||
Preconditions.checkNotNull(args);
|
Preconditions.checkNotNull(args);
|
||||||
metadataManager.writeLock().lock();
|
metadataManager.writeLock().lock();
|
||||||
String volumeNameString = args.getVolumeName();
|
String volumeName = args.getVolumeName();
|
||||||
String bucketNameString = args.getBucketName();
|
String bucketName = args.getBucketName();
|
||||||
try {
|
try {
|
||||||
//bucket key: {volume/bucket}
|
//bucket key: {volume/bucket}
|
||||||
String bucketKeyString = volumeNameString +
|
byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
|
||||||
DB_KEY_DELIMITER + bucketNameString;
|
byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
|
||||||
|
|
||||||
byte[] volumeName = DFSUtil.string2Bytes(volumeNameString);
|
|
||||||
byte[] bucketKey = DFSUtil.string2Bytes(bucketKeyString);
|
|
||||||
|
|
||||||
//Check if the volume exists
|
//Check if the volume exists
|
||||||
if(metadataManager.get(volumeName) == null) {
|
if(metadataManager.get(volumeKey) == null) {
|
||||||
LOG.error("volume: {} not found ", volumeNameString);
|
LOG.error("volume: {} not found ", volumeName);
|
||||||
throw new KSMException("Volume doesn't exist",
|
throw new KSMException("Volume doesn't exist",
|
||||||
KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
|
KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
|
||||||
}
|
}
|
||||||
//Check if bucket already exists
|
//Check if bucket already exists
|
||||||
if(metadataManager.get(bucketKey) != null) {
|
if(metadataManager.get(bucketKey) != null) {
|
||||||
LOG.error("bucket: {} already exists ", bucketNameString);
|
LOG.error("bucket: {} already exists ", bucketName);
|
||||||
throw new KSMException("Bucket already exist",
|
throw new KSMException("Bucket already exist",
|
||||||
KSMException.ResultCodes.FAILED_BUCKET_ALREADY_EXISTS);
|
KSMException.ResultCodes.FAILED_BUCKET_ALREADY_EXISTS);
|
||||||
}
|
}
|
||||||
metadataManager.put(bucketKey, args.getProtobuf().toByteArray());
|
metadataManager.put(bucketKey, args.getProtobuf().toByteArray());
|
||||||
|
|
||||||
LOG.info("created bucket: {} in volume: {}", bucketNameString,
|
LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
|
||||||
volumeNameString);
|
|
||||||
} catch (DBException ex) {
|
} catch (DBException ex) {
|
||||||
LOG.error("Bucket creation failed for bucket:{} in volume:{}",
|
LOG.error("Bucket creation failed for bucket:{} in volume:{}",
|
||||||
volumeNameString, bucketNameString, ex);
|
bucketName, volumeName, ex);
|
||||||
throw new KSMException(ex.getMessage(),
|
throw new KSMException(ex.getMessage(),
|
||||||
KSMException.ResultCodes.FAILED_INTERNAL_ERROR);
|
KSMException.ResultCodes.FAILED_INTERNAL_ERROR);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -29,10 +29,14 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
|
||||||
public class KSMMetrics {
|
public class KSMMetrics {
|
||||||
// KSM op metrics
|
// KSM op metrics
|
||||||
private @Metric MutableCounterLong numVolumeCreates;
|
private @Metric MutableCounterLong numVolumeCreates;
|
||||||
|
private @Metric MutableCounterLong numVolumeModifies;
|
||||||
|
private @Metric MutableCounterLong numVolumeInfos;
|
||||||
private @Metric MutableCounterLong numBucketCreates;
|
private @Metric MutableCounterLong numBucketCreates;
|
||||||
|
|
||||||
// Failure Metrics
|
// Failure Metrics
|
||||||
private @Metric MutableCounterLong numVolumeCreateFails;
|
private @Metric MutableCounterLong numVolumeCreateFails;
|
||||||
|
private @Metric MutableCounterLong numVolumeModifyFails;
|
||||||
|
private @Metric MutableCounterLong numVolumeInfoFails;
|
||||||
private @Metric MutableCounterLong numBucketCreateFails;
|
private @Metric MutableCounterLong numBucketCreateFails;
|
||||||
|
|
||||||
public KSMMetrics() {
|
public KSMMetrics() {
|
||||||
|
@ -49,6 +53,14 @@ public class KSMMetrics {
|
||||||
numVolumeCreates.incr();
|
numVolumeCreates.incr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void incNumVolumeModifies() {
|
||||||
|
numVolumeModifies.incr();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void incNumVolumeInfos() {
|
||||||
|
numVolumeInfos.incr();
|
||||||
|
}
|
||||||
|
|
||||||
public void incNumBucketCreates() {
|
public void incNumBucketCreates() {
|
||||||
numBucketCreates.incr();
|
numBucketCreates.incr();
|
||||||
}
|
}
|
||||||
|
@ -57,6 +69,14 @@ public class KSMMetrics {
|
||||||
numVolumeCreates.incr();
|
numVolumeCreates.incr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void incNumVolumeModifyFails() {
|
||||||
|
numVolumeModifyFails.incr();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void incNumVolumeInfoFails() {
|
||||||
|
numVolumeInfoFails.incr();
|
||||||
|
}
|
||||||
|
|
||||||
public void incNumBucketCreateFails() {
|
public void incNumBucketCreateFails() {
|
||||||
numBucketCreateFails.incr();
|
numBucketCreateFails.incr();
|
||||||
}
|
}
|
||||||
|
@ -66,6 +86,16 @@ public class KSMMetrics {
|
||||||
return numVolumeCreates.value();
|
return numVolumeCreates.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public long getNumVolumeModifies() {
|
||||||
|
return numVolumeModifies.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public long getNumVolumeInfos() {
|
||||||
|
return numVolumeInfos.value();
|
||||||
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public long getNumBucketCreates() {
|
public long getNumBucketCreates() {
|
||||||
return numBucketCreates.value();
|
return numBucketCreates.value();
|
||||||
|
@ -76,6 +106,16 @@ public class KSMMetrics {
|
||||||
return numVolumeCreateFails.value();
|
return numVolumeCreateFails.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public long getNumVolumeModifyFails() {
|
||||||
|
return numVolumeModifyFails.value();
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public long getNumVolumeInfoFails() {
|
||||||
|
return numVolumeInfoFails.value();
|
||||||
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public long getNumBucketCreateFails() {
|
public long getNumBucketCreateFails() {
|
||||||
return numBucketCreateFails.value();
|
return numBucketCreateFails.value();
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.ksm;
|
package org.apache.hadoop.ozone.ksm;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.protobuf.BlockingService;
|
import com.google.protobuf.BlockingService;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
@ -121,15 +120,6 @@ public class KeySpaceManager implements KeySpaceManagerProtocol {
|
||||||
return metrics;
|
return metrics;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns listening address of Key Space Manager RPC server.
|
|
||||||
*
|
|
||||||
* @return listen address of Key Space Manager RPC server
|
|
||||||
*/
|
|
||||||
@VisibleForTesting
|
|
||||||
public InetSocketAddress getClientRpcAddress() {
|
|
||||||
return ksmRpcAddress;
|
|
||||||
}
|
|
||||||
/**
|
/**
|
||||||
* Main entry point for starting KeySpaceManager.
|
* Main entry point for starting KeySpaceManager.
|
||||||
*
|
*
|
||||||
|
@ -244,7 +234,13 @@ public class KeySpaceManager implements KeySpaceManagerProtocol {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void setOwner(String volume, String owner) throws IOException {
|
public void setOwner(String volume, String owner) throws IOException {
|
||||||
|
try {
|
||||||
|
metrics.incNumVolumeModifies();
|
||||||
|
volumeManager.setOwner(volume, owner);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
metrics.incNumVolumeModifyFails();
|
||||||
|
throw ex;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -256,7 +252,13 @@ public class KeySpaceManager implements KeySpaceManagerProtocol {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void setQuota(String volume, long quota) throws IOException {
|
public void setQuota(String volume, long quota) throws IOException {
|
||||||
|
try {
|
||||||
|
metrics.incNumVolumeModifies();
|
||||||
|
volumeManager.setQuota(volume, quota);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
metrics.incNumVolumeModifyFails();
|
||||||
|
throw ex;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -275,17 +277,23 @@ public class KeySpaceManager implements KeySpaceManagerProtocol {
|
||||||
/**
|
/**
|
||||||
* Gets the volume information.
|
* Gets the volume information.
|
||||||
*
|
*
|
||||||
* @param volume - Volume name.s
|
* @param volume - Volume name.
|
||||||
* @return VolumeArgs or exception is thrown.
|
* @return VolumeArgs or exception is thrown.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
|
public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
|
||||||
return null;
|
try {
|
||||||
|
metrics.incNumVolumeInfos();
|
||||||
|
return volumeManager.getVolumeInfo(volume);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
metrics.incNumVolumeInfoFails();
|
||||||
|
throw ex;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes the an exisiting empty volume.
|
* Deletes an existing empty volume.
|
||||||
*
|
*
|
||||||
* @param volume - Name of the volume.
|
* @param volume - Name of the volume.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
|
|
@ -61,11 +61,38 @@ public interface MetadataManager {
|
||||||
*/
|
*/
|
||||||
void put(byte[] key, byte[] value);
|
void put(byte[] key, byte[] value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs batch Put and Delete to Metadata DB.
|
||||||
|
* Can be used to do multiple puts and deletes atomically.
|
||||||
|
* @param putList - list of Key/Value to put into DB
|
||||||
|
* @param delList - list of Key to delete from DB
|
||||||
|
*/
|
||||||
|
void batchPutDelete(List<Map.Entry<byte[], byte[]>> putList,
|
||||||
|
List<byte[]> delList) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs a batch Put to Metadata DB.
|
* Performs a batch Put to Metadata DB.
|
||||||
* Can be used to do multiple puts atomically.
|
* Can be used to do multiple puts atomically.
|
||||||
* @param list - list of Map.Entry
|
* @param putList - list of Key/Value to put into DB
|
||||||
*/
|
*/
|
||||||
void batchPut(List<Map.Entry<byte[], byte[]>> list) throws IOException;
|
void batchPut(List<Map.Entry<byte[], byte[]>> putList) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a volume return the corresponding DB key.
|
||||||
|
* @param volume - Volume name
|
||||||
|
*/
|
||||||
|
byte[] getVolumeKey(String volume);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a user return the corresponding DB key.
|
||||||
|
* @param user - User name
|
||||||
|
*/
|
||||||
|
byte[] getUserKey(String user);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a volume and bucket, return the corresponding DB key.
|
||||||
|
* @param volume - User name
|
||||||
|
* @param bucket - Bucket name
|
||||||
|
*/
|
||||||
|
byte[] getBucketKey(String volume, String bucket);
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ozone.ksm;
|
package org.apache.hadoop.ozone.ksm;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.ozone.OzoneConfiguration;
|
import org.apache.hadoop.ozone.OzoneConfiguration;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
||||||
|
@ -75,6 +76,35 @@ public class MetadataManagerImpl implements MetadataManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a volume return the corresponding DB key.
|
||||||
|
* @param volume - Volume name
|
||||||
|
*/
|
||||||
|
public byte[] getVolumeKey(String volume) {
|
||||||
|
String dbVolumeName = OzoneConsts.KSM_VOLUME_PREFIX + volume;
|
||||||
|
return DFSUtil.string2Bytes(dbVolumeName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a user return the corresponding DB key.
|
||||||
|
* @param user - User name
|
||||||
|
*/
|
||||||
|
public byte[] getUserKey(String user) {
|
||||||
|
String dbUserName = OzoneConsts.KSM_USER_PREFIX + user;
|
||||||
|
return DFSUtil.string2Bytes(dbUserName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given a volume and bucket, return the corresponding DB key.
|
||||||
|
* @param volume - User name
|
||||||
|
* @param bucket - Bucket name
|
||||||
|
*/
|
||||||
|
public byte[] getBucketKey(String volume, String bucket) {
|
||||||
|
String bucketKeyString = OzoneConsts.KSM_VOLUME_PREFIX + volume
|
||||||
|
+ OzoneConsts.KSM_BUCKET_PREFIX + bucket;
|
||||||
|
return DFSUtil.string2Bytes(bucketKeyString);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the read lock used on Metadata DB.
|
* Returns the read lock used on Metadata DB.
|
||||||
* @return readLock
|
* @return readLock
|
||||||
|
@ -113,6 +143,26 @@ public class MetadataManagerImpl implements MetadataManager {
|
||||||
store.put(key, value);
|
store.put(key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs a batch Put and Delete from Metadata DB.
|
||||||
|
* Can be used to do multiple puts and deletes atomically.
|
||||||
|
* @param putList - list of key and value pairs to put to Metadata DB.
|
||||||
|
* @param delList - list of keys to delete from Metadata DB.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void batchPutDelete(List<Map.Entry<byte[], byte[]>> putList,
|
||||||
|
List<byte[]> delList)
|
||||||
|
throws IOException {
|
||||||
|
WriteBatch batch = store.createWriteBatch();
|
||||||
|
putList.forEach(entry -> batch.put(entry.getKey(), entry.getValue()));
|
||||||
|
delList.forEach(entry -> batch.delete(entry));
|
||||||
|
try {
|
||||||
|
store.commitWriteBatch(batch);
|
||||||
|
} finally {
|
||||||
|
store.closeWriteBatch(batch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs a batch Put to Metadata DB.
|
* Performs a batch Put to Metadata DB.
|
||||||
* Can be used to do multiple puts atomically.
|
* Can be used to do multiple puts atomically.
|
||||||
|
|
|
@ -30,4 +30,30 @@ public interface VolumeManager {
|
||||||
* @param args - Volume args to create a volume
|
* @param args - Volume args to create a volume
|
||||||
*/
|
*/
|
||||||
void createVolume(KsmVolumeArgs args) throws IOException;
|
void createVolume(KsmVolumeArgs args) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Changes the owner of a volume.
|
||||||
|
*
|
||||||
|
* @param volume - Name of the volume.
|
||||||
|
* @param owner - Name of the owner.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void setOwner(String volume, String owner) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Changes the Quota on a volume.
|
||||||
|
*
|
||||||
|
* @param volume - Name of the volume.
|
||||||
|
* @param quota - Quota in bytes.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void setQuota(String volume, long quota) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the volume information.
|
||||||
|
* @param volume - Volume name.
|
||||||
|
* @return VolumeArgs or exception is thrown.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
KsmVolumeArgs getVolumeInfo(String volume) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
package org.apache.hadoop.ozone.ksm;
|
package org.apache.hadoop.ozone.ksm;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
|
||||||
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
|
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
|
||||||
import org.apache.hadoop.ozone.OzoneConfiguration;
|
import org.apache.hadoop.ozone.OzoneConfiguration;
|
||||||
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
|
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
|
||||||
|
@ -64,6 +63,62 @@ public class VolumeManagerImpl implements VolumeManager {
|
||||||
OZONE_KSM_USER_MAX_VOLUME_DEFAULT);
|
OZONE_KSM_USER_MAX_VOLUME_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helpers to add and delete volume from user list
|
||||||
|
private void addVolumeToOwnerList(String volume, String owner,
|
||||||
|
List<Map.Entry<byte[], byte[]>> putBatch)
|
||||||
|
throws IOException {
|
||||||
|
// Get the volume list
|
||||||
|
byte[] dbUserKey = metadataManager.getUserKey(owner);
|
||||||
|
byte[] volumeList = metadataManager.get(dbUserKey);
|
||||||
|
List<String> prevVolList = new LinkedList<>();
|
||||||
|
if (volumeList != null) {
|
||||||
|
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
||||||
|
prevVolList.addAll(vlist.getVolumeNamesList());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the volume count
|
||||||
|
if (prevVolList.size() >= maxUserVolumeCount) {
|
||||||
|
LOG.error("Too many volumes for user:{}", owner);
|
||||||
|
throw new KSMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the new volume to the list
|
||||||
|
prevVolList.add(volume);
|
||||||
|
VolumeList newVolList = VolumeList.newBuilder()
|
||||||
|
.addAllVolumeNames(prevVolList).build();
|
||||||
|
putBatch.add(batchEntry(dbUserKey, newVolList.toByteArray()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void delVolumeFromOwnerList(String volume, String owner,
|
||||||
|
List<Map.Entry<byte[], byte[]>> putBatch,
|
||||||
|
List<byte[]> deleteBatch)
|
||||||
|
throws IOException {
|
||||||
|
// Get the volume list
|
||||||
|
byte[] dbUserKey = metadataManager.getUserKey(owner);
|
||||||
|
byte[] volumeList = metadataManager.get(dbUserKey);
|
||||||
|
List<String> prevVolList = new LinkedList<>();
|
||||||
|
if (volumeList != null) {
|
||||||
|
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
||||||
|
prevVolList.addAll(vlist.getVolumeNamesList());
|
||||||
|
} else {
|
||||||
|
throw new KSMException(ResultCodes.FAILED_USER_NOT_FOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the volume from the list
|
||||||
|
prevVolList.remove(volume);
|
||||||
|
if (prevVolList.size() == 0) {
|
||||||
|
deleteBatch.add(dbUserKey);
|
||||||
|
} else {
|
||||||
|
VolumeList newVolList = VolumeList.newBuilder()
|
||||||
|
.addAllVolumeNames(prevVolList).build();
|
||||||
|
putBatch.add(batchEntry(dbUserKey, newVolList.toByteArray()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map.Entry<byte[], byte[]> batchEntry(byte[] key, byte[] value) {
|
||||||
|
return new AbstractMap.SimpleEntry<>(key, value);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a volume.
|
* Creates a volume.
|
||||||
* @param args - KsmVolumeArgs.
|
* @param args - KsmVolumeArgs.
|
||||||
|
@ -74,42 +129,21 @@ public class VolumeManagerImpl implements VolumeManager {
|
||||||
metadataManager.writeLock().lock();
|
metadataManager.writeLock().lock();
|
||||||
List<Map.Entry<byte[], byte[]>> batch = new LinkedList<>();
|
List<Map.Entry<byte[], byte[]>> batch = new LinkedList<>();
|
||||||
try {
|
try {
|
||||||
byte[] volumeName = metadataManager.
|
byte[] dbVolumeKey = metadataManager.getVolumeKey(args.getVolume());
|
||||||
get(DFSUtil.string2Bytes(args.getVolume()));
|
byte[] volumeInfo = metadataManager.get(dbVolumeKey);
|
||||||
|
|
||||||
// Check of the volume already exists
|
// Check of the volume already exists
|
||||||
if(volumeName != null) {
|
if (volumeInfo != null) {
|
||||||
LOG.error("volume:{} already exists", args.getVolume());
|
LOG.error("volume:{} already exists", args.getVolume());
|
||||||
throw new KSMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS);
|
throw new KSMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next count the number of volumes for the user
|
// Write the vol info
|
||||||
String dbUserName = "$" + args.getOwnerName();
|
VolumeInfo newVolumeInfo = args.getProtobuf();
|
||||||
byte[] volumeList = metadataManager
|
batch.add(batchEntry(dbVolumeKey, newVolumeInfo.toByteArray()));
|
||||||
.get(DFSUtil.string2Bytes(dbUserName));
|
|
||||||
List prevVolList;
|
|
||||||
if (volumeList != null) {
|
|
||||||
VolumeList vlist = VolumeList.parseFrom(volumeList);
|
|
||||||
prevVolList = vlist.getVolumeNamesList();
|
|
||||||
} else {
|
|
||||||
prevVolList = new LinkedList();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (prevVolList.size() >= maxUserVolumeCount) {
|
// Add volume to user list
|
||||||
LOG.error("Too many volumes for user:{}", args.getOwnerName());
|
addVolumeToOwnerList(args.getVolume(), args.getOwnerName(), batch);
|
||||||
throw new KSMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit the volume information to metadataManager
|
|
||||||
VolumeInfo volumeInfo = args.getProtobuf();
|
|
||||||
batch.add(new AbstractMap.SimpleEntry<>(
|
|
||||||
DFSUtil.string2Bytes(args.getVolume()), volumeInfo.toByteArray()));
|
|
||||||
|
|
||||||
prevVolList.add(args.getVolume());
|
|
||||||
VolumeList newVolList = VolumeList.newBuilder()
|
|
||||||
.addAllVolumeNames(prevVolList).build();
|
|
||||||
batch.add(new AbstractMap.SimpleEntry<>(
|
|
||||||
DFSUtil.string2Bytes(dbUserName), newVolList.toByteArray()));
|
|
||||||
metadataManager.batchPut(batch);
|
metadataManager.batchPut(batch);
|
||||||
LOG.info("created volume:{} user:{}",
|
LOG.info("created volume:{} user:{}",
|
||||||
args.getVolume(), args.getOwnerName());
|
args.getVolume(), args.getOwnerName());
|
||||||
|
@ -121,4 +155,120 @@ public class VolumeManagerImpl implements VolumeManager {
|
||||||
metadataManager.writeLock().unlock();
|
metadataManager.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Changes the owner of a volume.
|
||||||
|
*
|
||||||
|
* @param volume - Name of the volume.
|
||||||
|
* @param owner - Name of the owner.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void setOwner(String volume, String owner) throws IOException {
|
||||||
|
Preconditions.checkNotNull(volume);
|
||||||
|
Preconditions.checkNotNull(owner);
|
||||||
|
List<Map.Entry<byte[], byte[]>> putbatch = new LinkedList<>();
|
||||||
|
List<byte[]> deletebatch = new LinkedList<>();
|
||||||
|
metadataManager.writeLock().lock();
|
||||||
|
try {
|
||||||
|
byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
|
||||||
|
byte[] volInfo = metadataManager.get(dbVolumeKey);
|
||||||
|
if (volInfo == null) {
|
||||||
|
throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
|
||||||
|
KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
|
||||||
|
Preconditions.checkState(volume.equalsIgnoreCase(volumeInfo.getVolume()));
|
||||||
|
|
||||||
|
delVolumeFromOwnerList(volume, volumeArgs.getOwnerName(),
|
||||||
|
putbatch, deletebatch);
|
||||||
|
addVolumeToOwnerList(volume, owner, putbatch);
|
||||||
|
|
||||||
|
KsmVolumeArgs newVolumeArgs =
|
||||||
|
KsmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume())
|
||||||
|
.setAdminName(volumeArgs.getAdminName())
|
||||||
|
.setOwnerName(owner)
|
||||||
|
.setQuotaInBytes(volumeArgs.getQuotaInBytes())
|
||||||
|
.build();
|
||||||
|
|
||||||
|
VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
|
||||||
|
putbatch.add(batchEntry(dbVolumeKey, newVolumeInfo.toByteArray()));
|
||||||
|
|
||||||
|
metadataManager.batchPutDelete(putbatch, deletebatch);
|
||||||
|
} catch (IOException ex) {
|
||||||
|
LOG.error("Changing volume ownership failed for user:{} volume:{}",
|
||||||
|
owner, volume, ex);
|
||||||
|
throw ex;
|
||||||
|
} finally {
|
||||||
|
metadataManager.writeLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Changes the Quota on a volume.
|
||||||
|
*
|
||||||
|
* @param volume - Name of the volume.
|
||||||
|
* @param quota - Quota in bytes.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public void setQuota(String volume, long quota) throws IOException {
|
||||||
|
Preconditions.checkNotNull(volume);
|
||||||
|
metadataManager.writeLock().lock();
|
||||||
|
try {
|
||||||
|
byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
|
||||||
|
byte[] volInfo = metadataManager.get(dbVolumeKey);
|
||||||
|
if (volInfo == null) {
|
||||||
|
throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
|
||||||
|
KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
|
||||||
|
Preconditions.checkState(volume.equalsIgnoreCase(volumeInfo.getVolume()));
|
||||||
|
|
||||||
|
KsmVolumeArgs newVolumeArgs =
|
||||||
|
KsmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume())
|
||||||
|
.setAdminName(volumeArgs.getAdminName())
|
||||||
|
.setOwnerName(volumeArgs.getOwnerName())
|
||||||
|
.setQuotaInBytes(quota)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
|
||||||
|
metadataManager.put(dbVolumeKey, newVolumeInfo.toByteArray());
|
||||||
|
} catch (IOException ex) {
|
||||||
|
LOG.error("Changing volume quota failed for volume:{} quota:{}",
|
||||||
|
volume, quota, ex);
|
||||||
|
throw ex;
|
||||||
|
} finally {
|
||||||
|
metadataManager.writeLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the volume information.
|
||||||
|
* @param volume - Volume name.
|
||||||
|
* @return VolumeArgs or exception is thrown.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
|
||||||
|
Preconditions.checkNotNull(volume);
|
||||||
|
metadataManager.readLock().lock();
|
||||||
|
try {
|
||||||
|
byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
|
||||||
|
byte[] volInfo = metadataManager.get(dbVolumeKey);
|
||||||
|
if (volInfo == null) {
|
||||||
|
throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
|
||||||
|
KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
|
||||||
|
Preconditions.checkState(volume.equalsIgnoreCase(volumeInfo.getVolume()));
|
||||||
|
return volumeArgs;
|
||||||
|
} catch (IOException ex) {
|
||||||
|
LOG.error("Info volume failed for volume:{}", volume, ex);
|
||||||
|
throw ex;
|
||||||
|
} finally {
|
||||||
|
metadataManager.readLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,6 +99,7 @@ public class KSMException extends IOException {
|
||||||
FAILED_TOO_MANY_USER_VOLUMES,
|
FAILED_TOO_MANY_USER_VOLUMES,
|
||||||
FAILED_VOLUME_ALREADY_EXISTS,
|
FAILED_VOLUME_ALREADY_EXISTS,
|
||||||
FAILED_VOLUME_NOT_FOUND,
|
FAILED_VOLUME_NOT_FOUND,
|
||||||
|
FAILED_USER_NOT_FOUND,
|
||||||
FAILED_BUCKET_ALREADY_EXISTS,
|
FAILED_BUCKET_ALREADY_EXISTS,
|
||||||
FAILED_INTERNAL_ERROR
|
FAILED_INTERNAL_ERROR
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
|
||||||
import org.apache.hadoop.ksm.protocol.KeySpaceManagerProtocol;
|
import org.apache.hadoop.ksm.protocol.KeySpaceManagerProtocol;
|
||||||
import org.apache.hadoop.ksm.protocolPB.KeySpaceManagerProtocolPB;
|
import org.apache.hadoop.ksm.protocolPB.KeySpaceManagerProtocolPB;
|
||||||
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
|
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
|
||||||
import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;
|
|
||||||
import org.apache.hadoop.ozone.protocol.proto
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
.KeySpaceManagerProtocolProtos.CreateBucketRequest;
|
.KeySpaceManagerProtocolProtos.CreateBucketRequest;
|
||||||
import org.apache.hadoop.ozone.protocol.proto
|
import org.apache.hadoop.ozone.protocol.proto
|
||||||
|
@ -77,6 +76,29 @@ public class KeySpaceManagerProtocolServerSideTranslatorPB implements
|
||||||
this.impl = impl;
|
this.impl = impl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Convert and exception to corresponding status code
|
||||||
|
private Status exceptionToResponseStatus(IOException ex) {
|
||||||
|
if (ex instanceof KSMException) {
|
||||||
|
KSMException ksmException = (KSMException)ex;
|
||||||
|
switch (ksmException.getResult()) {
|
||||||
|
case FAILED_VOLUME_ALREADY_EXISTS:
|
||||||
|
return Status.VOLUME_ALREADY_EXISTS;
|
||||||
|
case FAILED_TOO_MANY_USER_VOLUMES:
|
||||||
|
return Status.USER_TOO_MANY_VOLUMES;
|
||||||
|
case FAILED_VOLUME_NOT_FOUND:
|
||||||
|
return Status.VOLUME_NOT_FOUND;
|
||||||
|
case FAILED_USER_NOT_FOUND:
|
||||||
|
return Status.USER_NOT_FOUND;
|
||||||
|
case FAILED_BUCKET_ALREADY_EXISTS:
|
||||||
|
return Status.BUCKET_ALREADY_EXISTS;
|
||||||
|
default:
|
||||||
|
return Status.INTERNAL_ERROR;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Status.INTERNAL_ERROR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CreateVolumeResponse createVolume(
|
public CreateVolumeResponse createVolume(
|
||||||
RpcController controller, CreateVolumeRequest request)
|
RpcController controller, CreateVolumeRequest request)
|
||||||
|
@ -86,18 +108,7 @@ public class KeySpaceManagerProtocolServerSideTranslatorPB implements
|
||||||
try {
|
try {
|
||||||
impl.createVolume(KsmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
|
impl.createVolume(KsmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (e instanceof KSMException) {
|
resp.setStatus(exceptionToResponseStatus(e));
|
||||||
KSMException ksmException = (KSMException)e;
|
|
||||||
if (ksmException.getResult() ==
|
|
||||||
ResultCodes.FAILED_VOLUME_ALREADY_EXISTS) {
|
|
||||||
resp.setStatus(Status.VOLUME_ALREADY_EXISTS);
|
|
||||||
} else if (ksmException.getResult() ==
|
|
||||||
ResultCodes.FAILED_TOO_MANY_USER_VOLUMES) {
|
|
||||||
resp.setStatus(Status.USER_TOO_MANY_VOLUMES);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
resp.setStatus(Status.INTERNAL_ERROR);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return resp.build();
|
return resp.build();
|
||||||
}
|
}
|
||||||
|
@ -106,7 +117,23 @@ public class KeySpaceManagerProtocolServerSideTranslatorPB implements
|
||||||
public SetVolumePropertyResponse setVolumeProperty(
|
public SetVolumePropertyResponse setVolumeProperty(
|
||||||
RpcController controller, SetVolumePropertyRequest request)
|
RpcController controller, SetVolumePropertyRequest request)
|
||||||
throws ServiceException {
|
throws ServiceException {
|
||||||
return null;
|
SetVolumePropertyResponse.Builder resp =
|
||||||
|
SetVolumePropertyResponse.newBuilder();
|
||||||
|
resp.setStatus(Status.OK);
|
||||||
|
String volume = request.getVolumeName();
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (request.hasQuotaInBytes()) {
|
||||||
|
long quota = request.getQuotaInBytes();
|
||||||
|
impl.setQuota(volume, quota);
|
||||||
|
} else {
|
||||||
|
String owner = request.getOwnerName();
|
||||||
|
impl.setOwner(volume, owner);
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
resp.setStatus(exceptionToResponseStatus(e));
|
||||||
|
}
|
||||||
|
return resp.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -120,7 +147,16 @@ public class KeySpaceManagerProtocolServerSideTranslatorPB implements
|
||||||
public InfoVolumeResponse infoVolume(
|
public InfoVolumeResponse infoVolume(
|
||||||
RpcController controller, InfoVolumeRequest request)
|
RpcController controller, InfoVolumeRequest request)
|
||||||
throws ServiceException {
|
throws ServiceException {
|
||||||
return null;
|
InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder();
|
||||||
|
resp.setStatus(Status.OK);
|
||||||
|
String volume = request.getVolumeName();
|
||||||
|
try {
|
||||||
|
KsmVolumeArgs ret = impl.getVolumeInfo(volume);
|
||||||
|
resp.setVolumeInfo(ret.getProtobuf());
|
||||||
|
} catch (IOException e) {
|
||||||
|
resp.setStatus(exceptionToResponseStatus(e));
|
||||||
|
}
|
||||||
|
return resp.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -147,16 +183,8 @@ public class KeySpaceManagerProtocolServerSideTranslatorPB implements
|
||||||
impl.createBucket(KsmBucketArgs.getFromProtobuf(
|
impl.createBucket(KsmBucketArgs.getFromProtobuf(
|
||||||
request.getBucketInfo()));
|
request.getBucketInfo()));
|
||||||
resp.setStatus(Status.OK);
|
resp.setStatus(Status.OK);
|
||||||
} catch (KSMException ksmEx) {
|
} catch (IOException e) {
|
||||||
if (ksmEx.getResult() ==
|
resp.setStatus(exceptionToResponseStatus(e));
|
||||||
ResultCodes.FAILED_VOLUME_NOT_FOUND) {
|
|
||||||
resp.setStatus(Status.VOLUME_NOT_FOUND);
|
|
||||||
} else if (ksmEx.getResult() ==
|
|
||||||
ResultCodes.FAILED_BUCKET_ALREADY_EXISTS) {
|
|
||||||
resp.setStatus(Status.BUCKET_ALREADY_EXISTS);
|
|
||||||
}
|
|
||||||
} catch(IOException ex) {
|
|
||||||
resp.setStatus(Status.INTERNAL_ERROR);
|
|
||||||
}
|
}
|
||||||
return resp.build();
|
return resp.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.web.request;
|
||||||
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.ozone.web.headers.Header;
|
import org.apache.hadoop.ozone.web.headers.Header;
|
||||||
import org.codehaus.jackson.annotate.JsonIgnore;
|
import org.codehaus.jackson.annotate.JsonIgnore;
|
||||||
|
|
||||||
|
@ -29,10 +30,6 @@ import org.codehaus.jackson.annotate.JsonIgnore;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class OzoneQuota {
|
public class OzoneQuota {
|
||||||
private static final long MB_IN_BYTES = 1048576L;
|
|
||||||
private static final long GB_IN_BYTES = 1073741824L;
|
|
||||||
private static final long TB_IN_BYTES = 1099511627776L;
|
|
||||||
|
|
||||||
|
|
||||||
private Units unit;
|
private Units unit;
|
||||||
private int size;
|
private int size;
|
||||||
|
@ -179,14 +176,40 @@ public class OzoneQuota {
|
||||||
case BYTES:
|
case BYTES:
|
||||||
return this.getSize();
|
return this.getSize();
|
||||||
case MB:
|
case MB:
|
||||||
return this.getSize() * MB_IN_BYTES;
|
return this.getSize() * OzoneConsts.MB;
|
||||||
case GB:
|
case GB:
|
||||||
return this.getSize() * GB_IN_BYTES;
|
return this.getSize() * OzoneConsts.GB;
|
||||||
case TB:
|
case TB:
|
||||||
return this.getSize() * TB_IN_BYTES;
|
return this.getSize() * OzoneConsts.TB;
|
||||||
case UNDEFINED:
|
case UNDEFINED:
|
||||||
default:
|
default:
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns OzoneQuota corresponding to size in bytes.
|
||||||
|
*
|
||||||
|
* @param sizeInBytes size in bytes to be converted
|
||||||
|
*
|
||||||
|
* @return OzoneQuota object
|
||||||
|
*/
|
||||||
|
public static OzoneQuota getOzoneQuota(long sizeInBytes) {
|
||||||
|
long size;
|
||||||
|
Units unit;
|
||||||
|
if (sizeInBytes % OzoneConsts.TB == 0) {
|
||||||
|
size = sizeInBytes / OzoneConsts.TB;
|
||||||
|
unit = Units.TB;
|
||||||
|
} else if (sizeInBytes % OzoneConsts.GB == 0) {
|
||||||
|
size = sizeInBytes / OzoneConsts.GB;
|
||||||
|
unit = Units.GB;
|
||||||
|
} else if (sizeInBytes % OzoneConsts.MB == 0) {
|
||||||
|
size = sizeInBytes / OzoneConsts.MB;
|
||||||
|
unit = Units.MB;
|
||||||
|
} else {
|
||||||
|
size = sizeInBytes;
|
||||||
|
unit = Units.BYTES;
|
||||||
|
}
|
||||||
|
return new OzoneQuota((int)size, unit);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.OzoneConfiguration;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
|
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
|
||||||
import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
|
import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
|
||||||
|
import org.apache.hadoop.ozone.web.request.OzoneQuota;
|
||||||
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.scm.ScmConfigKeys;
|
import org.apache.hadoop.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.scm.XceiverClientManager;
|
import org.apache.hadoop.scm.XceiverClientManager;
|
||||||
|
@ -115,7 +116,7 @@ public final class DistributedStorageHandler implements StorageHandler {
|
||||||
@Override
|
@Override
|
||||||
public void createVolume(VolumeArgs args) throws IOException, OzoneException {
|
public void createVolume(VolumeArgs args) throws IOException, OzoneException {
|
||||||
long quota = args.getQuota() == null ?
|
long quota = args.getQuota() == null ?
|
||||||
Long.MAX_VALUE : args.getQuota().sizeInBytes();
|
OzoneConsts.MAX_QUOTA_IN_BYTES : args.getQuota().sizeInBytes();
|
||||||
KsmVolumeArgs volumeArgs = KsmVolumeArgs.newBuilder()
|
KsmVolumeArgs volumeArgs = KsmVolumeArgs.newBuilder()
|
||||||
.setAdminName(args.getAdminName())
|
.setAdminName(args.getAdminName())
|
||||||
.setOwnerName(args.getUserName())
|
.setOwnerName(args.getUserName())
|
||||||
|
@ -128,13 +129,15 @@ public final class DistributedStorageHandler implements StorageHandler {
|
||||||
@Override
|
@Override
|
||||||
public void setVolumeOwner(VolumeArgs args) throws
|
public void setVolumeOwner(VolumeArgs args) throws
|
||||||
IOException, OzoneException {
|
IOException, OzoneException {
|
||||||
throw new UnsupportedOperationException("setVolumeOwner not implemented");
|
keySpaceManagerClient.setOwner(args.getVolumeName(), args.getUserName());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setVolumeQuota(VolumeArgs args, boolean remove)
|
public void setVolumeQuota(VolumeArgs args, boolean remove)
|
||||||
throws IOException, OzoneException {
|
throws IOException, OzoneException {
|
||||||
throw new UnsupportedOperationException("setVolumeQuota not implemented");
|
long quota = remove ? OzoneConsts.MAX_QUOTA_IN_BYTES :
|
||||||
|
args.getQuota().sizeInBytes();
|
||||||
|
keySpaceManagerClient.setQuota(args.getVolumeName(), quota);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -158,18 +161,15 @@ public final class DistributedStorageHandler implements StorageHandler {
|
||||||
@Override
|
@Override
|
||||||
public VolumeInfo getVolumeInfo(VolumeArgs args)
|
public VolumeInfo getVolumeInfo(VolumeArgs args)
|
||||||
throws IOException, OzoneException {
|
throws IOException, OzoneException {
|
||||||
String containerKey = buildContainerKey(args.getVolumeName());
|
KsmVolumeArgs volumeArgs =
|
||||||
XceiverClientSpi xceiverClient = acquireXceiverClient(containerKey);
|
keySpaceManagerClient.getVolumeInfo(args.getVolumeName());
|
||||||
try {
|
//TODO: add support for createdOn and other fields in getVolumeInfo
|
||||||
KeyData containerKeyData = containerKeyDataForRead(
|
VolumeInfo volInfo =
|
||||||
xceiverClient.getPipeline().getContainerName(), containerKey);
|
new VolumeInfo(volumeArgs.getVolume(), null,
|
||||||
GetKeyResponseProto response = getKey(xceiverClient, containerKeyData,
|
volumeArgs.getAdminName());
|
||||||
args.getRequestID());
|
volInfo.setOwner(new VolumeOwner(volumeArgs.getOwnerName()));
|
||||||
return fromContainerKeyValueListToVolume(
|
volInfo.setQuota(OzoneQuota.getOzoneQuota(volumeArgs.getQuotaInBytes()));
|
||||||
response.getKeyData().getMetadataList());
|
return volInfo;
|
||||||
} finally {
|
|
||||||
xceiverClientManager.releaseClient(xceiverClient);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.ksm;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
|
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
|
||||||
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
|
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
|
||||||
import org.apache.hadoop.ozone.ksm.exceptions
|
import org.apache.hadoop.ozone.ksm.exceptions
|
||||||
.KSMException.ResultCodes;
|
.KSMException.ResultCodes;
|
||||||
|
@ -56,6 +57,19 @@ public class TestBucketManagerImpl {
|
||||||
|
|
||||||
Mockito.when(metadataManager.writeLock()).thenReturn(lock.writeLock());
|
Mockito.when(metadataManager.writeLock()).thenReturn(lock.writeLock());
|
||||||
Mockito.when(metadataManager.readLock()).thenReturn(lock.readLock());
|
Mockito.when(metadataManager.readLock()).thenReturn(lock.readLock());
|
||||||
|
Mockito.when(metadataManager.getVolumeKey(any(String.class))).thenAnswer(
|
||||||
|
(InvocationOnMock invocation) ->
|
||||||
|
DFSUtil.string2Bytes(
|
||||||
|
OzoneConsts.KSM_VOLUME_PREFIX + invocation.getArguments()[0]));
|
||||||
|
Mockito.when(metadataManager
|
||||||
|
.getBucketKey(any(String.class), any(String.class))).thenAnswer(
|
||||||
|
(InvocationOnMock invocation) ->
|
||||||
|
DFSUtil.string2Bytes(
|
||||||
|
OzoneConsts.KSM_VOLUME_PREFIX
|
||||||
|
+ invocation.getArguments()[0]
|
||||||
|
+ OzoneConsts.KSM_BUCKET_PREFIX
|
||||||
|
+ invocation.getArguments()[1]));
|
||||||
|
|
||||||
Mockito.doAnswer(
|
Mockito.doAnswer(
|
||||||
new Answer<Void>() {
|
new Answer<Void>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -74,7 +88,8 @@ public class TestBucketManagerImpl {
|
||||||
);
|
);
|
||||||
for(String volumeName : volumesToCreate) {
|
for(String volumeName : volumesToCreate) {
|
||||||
byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName);
|
byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName);
|
||||||
metadataDB.put(volumeName, dummyVolumeInfo);
|
metadataDB.put(OzoneConsts.KSM_VOLUME_PREFIX + volumeName,
|
||||||
|
dummyVolumeInfo);
|
||||||
}
|
}
|
||||||
return metadataManager;
|
return metadataManager;
|
||||||
}
|
}
|
||||||
|
@ -112,7 +127,7 @@ public class TestBucketManagerImpl {
|
||||||
bucketManager.createBucket(bucketArgs);
|
bucketManager.createBucket(bucketArgs);
|
||||||
//TODO: Use BucketManagerImpl#getBucketInfo to verify creation of bucket.
|
//TODO: Use BucketManagerImpl#getBucketInfo to verify creation of bucket.
|
||||||
Assert.assertNotNull(metaMgr
|
Assert.assertNotNull(metaMgr
|
||||||
.get(DFSUtil.string2Bytes("sampleVol/bucketOne")));
|
.get(DFSUtil.string2Bytes("/sampleVol/bucketOne")));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -0,0 +1,166 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
* License for the specific language governing permissions and limitations under
|
||||||
|
* the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.ksm;
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
|
||||||
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
|
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||||
|
import org.apache.hadoop.ozone.OzoneConfiguration;
|
||||||
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
|
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
|
||||||
|
import org.apache.hadoop.ozone.web.handlers.UserArgs;
|
||||||
|
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
|
||||||
|
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
|
||||||
|
import org.apache.hadoop.ozone.web.request.OzoneQuota;
|
||||||
|
import org.apache.hadoop.ozone.web.response.VolumeInfo;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test Key Space Manager operation in distributed handler scenario.
|
||||||
|
*/
|
||||||
|
public class TestKeySpaceManager {
|
||||||
|
private static MiniOzoneCluster cluster = null;
|
||||||
|
private static StorageHandler storageHandler;
|
||||||
|
private static UserArgs volUserArgs;
|
||||||
|
private static KSMMetrics ksmMetrics;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a MiniDFSCluster for testing.
|
||||||
|
* <p>
|
||||||
|
* Ozone is made active by setting OZONE_ENABLED = true and
|
||||||
|
* OZONE_HANDLER_TYPE_KEY = "distributed"
|
||||||
|
*
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@BeforeClass
|
||||||
|
public static void init() throws Exception {
|
||||||
|
OzoneConfiguration conf = new OzoneConfiguration();
|
||||||
|
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
|
||||||
|
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
|
||||||
|
cluster = new MiniOzoneCluster.Builder(conf)
|
||||||
|
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
|
||||||
|
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
|
||||||
|
volUserArgs = new UserArgs(null, null, null, null, null, null);
|
||||||
|
ksmMetrics = cluster.getKeySpaceManager().getMetrics();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Shutdown MiniDFSCluster.
|
||||||
|
*/
|
||||||
|
@AfterClass
|
||||||
|
public static void shutdown() {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a volume and test its attribute after creating them
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testCreateVolume() throws IOException, OzoneException {
|
||||||
|
String userName = "user" + RandomStringUtils.randomNumeric(5);
|
||||||
|
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
|
||||||
|
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
|
||||||
|
|
||||||
|
VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, volUserArgs);
|
||||||
|
createVolumeArgs.setUserName(userName);
|
||||||
|
createVolumeArgs.setAdminName(adminName);
|
||||||
|
storageHandler.createVolume(createVolumeArgs);
|
||||||
|
|
||||||
|
VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, volUserArgs);
|
||||||
|
VolumeInfo retVolumeinfo = storageHandler.getVolumeInfo(getVolumeArgs);
|
||||||
|
Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName));
|
||||||
|
Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName));
|
||||||
|
Assert.assertEquals(0, ksmMetrics.getNumVolumeCreateFails());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a volume and modify the volume owner and then test its attributes
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testChangeVolumeOwner() throws IOException, OzoneException {
|
||||||
|
String userName = "user" + RandomStringUtils.randomNumeric(5);
|
||||||
|
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
|
||||||
|
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
|
||||||
|
|
||||||
|
VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, volUserArgs);
|
||||||
|
createVolumeArgs.setUserName(userName);
|
||||||
|
createVolumeArgs.setAdminName(adminName);
|
||||||
|
storageHandler.createVolume(createVolumeArgs);
|
||||||
|
|
||||||
|
String newUserName = "user" + RandomStringUtils.randomNumeric(5);
|
||||||
|
createVolumeArgs.setUserName(newUserName);
|
||||||
|
storageHandler.setVolumeOwner(createVolumeArgs);
|
||||||
|
|
||||||
|
VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, volUserArgs);
|
||||||
|
VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
|
||||||
|
|
||||||
|
Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
|
||||||
|
Assert.assertFalse(retVolumeInfo.getOwner().getName().equals(userName));
|
||||||
|
Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(newUserName));
|
||||||
|
Assert.assertEquals(0, ksmMetrics.getNumVolumeCreateFails());
|
||||||
|
Assert.assertEquals(0, ksmMetrics.getNumVolumeInfoFails());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a volume and modify the volume owner and then test its attributes
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testChangeVolumeQuota() throws IOException, OzoneException {
|
||||||
|
String userName = "user" + RandomStringUtils.randomNumeric(5);
|
||||||
|
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
|
||||||
|
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
|
||||||
|
Random rand = new Random();
|
||||||
|
|
||||||
|
// Create a new volume with a quota
|
||||||
|
OzoneQuota createQuota =
|
||||||
|
new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
|
||||||
|
VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, volUserArgs);
|
||||||
|
createVolumeArgs.setUserName(userName);
|
||||||
|
createVolumeArgs.setAdminName(adminName);
|
||||||
|
createVolumeArgs.setQuota(createQuota);
|
||||||
|
storageHandler.createVolume(createVolumeArgs);
|
||||||
|
|
||||||
|
VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, volUserArgs);
|
||||||
|
VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
|
||||||
|
Assert.assertEquals(retVolumeInfo.getQuota().sizeInBytes(),
|
||||||
|
createQuota.sizeInBytes());
|
||||||
|
|
||||||
|
// Set a new quota and test it
|
||||||
|
OzoneQuota setQuota =
|
||||||
|
new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
|
||||||
|
createVolumeArgs.setQuota(setQuota);
|
||||||
|
storageHandler.setVolumeQuota(createVolumeArgs, false);
|
||||||
|
getVolumeArgs = new VolumeArgs(volumeName, volUserArgs);
|
||||||
|
retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
|
||||||
|
Assert.assertEquals(retVolumeInfo.getQuota().sizeInBytes(),
|
||||||
|
setQuota.sizeInBytes());
|
||||||
|
|
||||||
|
// Remove the quota and test it again
|
||||||
|
storageHandler.setVolumeQuota(createVolumeArgs, true);
|
||||||
|
getVolumeArgs = new VolumeArgs(volumeName, volUserArgs);
|
||||||
|
retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
|
||||||
|
Assert.assertEquals(retVolumeInfo.getQuota().sizeInBytes(),
|
||||||
|
OzoneConsts.MAX_QUOTA_IN_BYTES);
|
||||||
|
Assert.assertEquals(0, ksmMetrics.getNumVolumeCreateFails());
|
||||||
|
Assert.assertEquals(0, ksmMetrics.getNumVolumeInfoFails());
|
||||||
|
}
|
||||||
|
}
|
|
@ -89,9 +89,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
|
||||||
public void testCreateVolumes() throws IOException {
|
public void testCreateVolumes() throws IOException {
|
||||||
super.testCreateVolumes(port);
|
super.testCreateVolumes(port);
|
||||||
Assert.assertEquals(cluster.getKeySpaceManager()
|
Assert.assertEquals(cluster.getKeySpaceManager()
|
||||||
.getMetrics().getNumVolumeCreates(), 1);
|
.getMetrics().getNumVolumeCreateFails(), 0);
|
||||||
Assert.assertEquals(cluster.getKeySpaceManager()
|
|
||||||
.getMetrics().getNumVolumeCreateFails(), 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -99,8 +97,11 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Test
|
||||||
public void testCreateVolumesWithQuota() throws IOException {
|
public void testCreateVolumesWithQuota() throws IOException {
|
||||||
super.testCreateVolumesWithQuota(port);
|
super.testCreateVolumesWithQuota(port);
|
||||||
|
Assert.assertEquals(cluster.getKeySpaceManager()
|
||||||
|
.getMetrics().getNumVolumeCreateFails(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -108,8 +109,11 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Test
|
||||||
public void testCreateVolumesWithInvalidQuota() throws IOException {
|
public void testCreateVolumesWithInvalidQuota() throws IOException {
|
||||||
super.testCreateVolumesWithInvalidQuota(port);
|
super.testCreateVolumesWithInvalidQuota(port);
|
||||||
|
Assert.assertEquals(cluster.getKeySpaceManager()
|
||||||
|
.getMetrics().getNumVolumeCreateFails(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -119,8 +123,11 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Test
|
||||||
public void testCreateVolumesWithInvalidUser() throws IOException {
|
public void testCreateVolumesWithInvalidUser() throws IOException {
|
||||||
super.testCreateVolumesWithInvalidUser(port);
|
super.testCreateVolumesWithInvalidUser(port);
|
||||||
|
Assert.assertEquals(cluster.getKeySpaceManager()
|
||||||
|
.getMetrics().getNumVolumeCreateFails(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -131,8 +138,11 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Test
|
||||||
public void testCreateVolumesWithOutAdminRights() throws IOException {
|
public void testCreateVolumesWithOutAdminRights() throws IOException {
|
||||||
super.testCreateVolumesWithOutAdminRights(port);
|
super.testCreateVolumesWithOutAdminRights(port);
|
||||||
|
Assert.assertEquals(cluster.getKeySpaceManager()
|
||||||
|
.getMetrics().getNumVolumeCreateFails(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -140,8 +150,11 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@Test
|
||||||
public void testCreateVolumesInLoop() throws IOException {
|
public void testCreateVolumesInLoop() throws IOException {
|
||||||
super.testCreateVolumesInLoop(port);
|
super.testCreateVolumesInLoop(port);
|
||||||
|
Assert.assertEquals(cluster.getKeySpaceManager()
|
||||||
|
.getMetrics().getNumVolumeCreateFails(), 0);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Get volumes owned by the user.
|
* Get volumes owned by the user.
|
||||||
|
|
Loading…
Reference in New Issue