HDFS-9916. OzoneHandler : Add Key handler. Contributed by Anu Engineer.

This commit is contained in:
Chris Nauroth 2016-03-09 10:17:05 -08:00 committed by Owen O'Malley
parent 2b387d622c
commit 93201330b8
7 changed files with 245 additions and 53 deletions

View File

@ -202,9 +202,12 @@ public final class ErrorTable {
Exception ex) {
OzoneException err =
new OzoneException(e.getHttpCode(), e.getShortMessage(), ex);
err.setRequestId(args.getRequestID());
err.setResource(args.getResourceName());
err.setHostID(args.getHostName());
if(args != null) {
err.setRequestId(args.getRequestID());
err.setResource(args.getResourceName());
err.setHostID(args.getHostName());
}
err.setMessage(ex.getMessage());
return err;
}

View File

@ -174,18 +174,16 @@ public class BucketHandler implements Bucket {
public Response doProcess(BucketArgs args)
throws OzoneException, IOException {
switch (info) {
//TODO : Enable when Object support is enabled.
// case Header.OZONE_LIST_QUERY_KEY:
// ListArgs listArgs = new ListArgs(args, prefix,
// maxKeys, startPage);
// return getBucketKeysList(listArgs);
case Header.OZONE_LIST_QUERY_BUCKET:
return getBucketInfoResponse(args);
default:
OzoneException ozException =
ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
ozException.setMessage("Unrecognized query param : " + info);
throw ozException;
case Header.OZONE_LIST_QUERY_KEY:
ListArgs listArgs = new ListArgs(args, prefix, maxKeys, startPage);
return getBucketKeysList(listArgs);
case Header.OZONE_LIST_QUERY_BUCKET:
return getBucketInfoResponse(args);
default:
OzoneException ozException =
ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
ozException.setMessage("Unrecognized query param : " + info);
throw ozException;
}
}
}.handleCall(volume, bucket, req, uriInfo, headers);

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.ListKeys;
import org.apache.hadoop.ozone.web.utils.OzoneConsts;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
@ -179,7 +180,6 @@ public abstract class BucketProcessTemplate {
*
* @return List of ACLs
*
* @throws OzoneException
*/
List<String> getAcls(BucketArgs args, String tag) {
List<String> aclStrings =
@ -275,4 +275,17 @@ public abstract class BucketProcessTemplate {
return OzoneUtils.getResponse(args, HTTP_OK, info.toJsonString());
}
/**
* Returns list of objects in a bucket.
* @param args - ListArgs
* @return Response
* @throws IOException
* @throws OzoneException
*/
Response getBucketKeysList(ListArgs args) throws IOException, OzoneException {
StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
ListKeys objects = fs.listKeys(args);
return OzoneUtils.getResponse(args, HTTP_OK, objects.toJsonString());
}
}

View File

@ -56,6 +56,7 @@ public final class Header {
public static final String OZONE_LIST_QUERY_TAG ="info";
public static final String OZONE_QUOTA_QUERY_TAG ="quota";
public static final String CONTENT_MD5 = "Content-MD5";

View File

@ -19,16 +19,21 @@
package org.apache.hadoop.ozone.web.interfaces;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
import org.apache.hadoop.ozone.web.handlers.ListArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.ListBuckets;
import org.apache.hadoop.ozone.web.response.ListKeys;
import org.apache.hadoop.ozone.web.response.ListVolumes;
import org.apache.hadoop.ozone.web.response.VolumeInfo;
import java.io.IOException;
import java.io.OutputStream;
/**
* Storage handler Interface is the Interface between
@ -205,5 +210,64 @@ public interface StorageHandler {
*/
BucketInfo getBucketInfo(BucketArgs args) throws IOException, OzoneException;
/**
* Writes a key in an existing bucket.
*
* @param args KeyArgs
*
* @return InputStream
*
* @throws OzoneException
*/
OutputStream newKeyWriter(KeyArgs args)
throws IOException, OzoneException;
/**
* Tells the file system that the object has been written out
* completely and it can do any house keeping operation that needs
* to be done.
*
* @param args Key Args
*
* @param stream
* @throws IOException
*/
void commitKey(KeyArgs args, OutputStream stream)
throws IOException, OzoneException;
/**
* Reads a key from an existing bucket.
*
* @param args KeyArgs
*
* @return LengthInputStream
*
* @throws IOException
*/
LengthInputStream newKeyReader(KeyArgs args)
throws IOException, OzoneException;
/**
* Deletes an existing key.
*
* @param args KeyArgs
*
* @throws OzoneException
*/
void deleteKey(KeyArgs args) throws IOException, OzoneException;
/**
* Returns a list of Key.
*
* @param args KeyArgs
*
* @return BucketList
*
* @throws IOException
*/
ListKeys listKeys(ListArgs args) throws IOException, OzoneException;
}

View File

@ -20,28 +20,33 @@ package org.apache.hadoop.ozone.web.localstorage;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
import org.apache.hadoop.ozone.web.handlers.ListArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.ListBuckets;
import org.apache.hadoop.ozone.web.response.ListKeys;
import org.apache.hadoop.ozone.web.response.ListVolumes;
import org.apache.hadoop.ozone.web.response.VolumeInfo;
import java.io.IOException;
import java.io.OutputStream;
/**
* PLEASE NOTE : This file is a dummy backend for test purposes
* and prototyping effort only. It does not handle any Object semantics
* correctly, neither does it take care of security.
* PLEASE NOTE : This file is a dummy backend for test purposes and prototyping
* effort only. It does not handle any Object semantics correctly, neither does
* it take care of security.
*/
@InterfaceAudience.Private
public class LocalStorageHandler implements StorageHandler {
private final Configuration conf;
/**
* Constructs LocalStorageHandler.
*/
@ -53,7 +58,6 @@ public class LocalStorageHandler implements StorageHandler {
* Creates Storage Volume.
*
* @param args - volumeArgs
*
* @throws IOException
*/
@Override
@ -68,7 +72,6 @@ public class LocalStorageHandler implements StorageHandler {
* setVolumeOwner - sets the owner of the volume.
*
* @param args volumeArgs
*
* @throws IOException
*/
@Override
@ -82,9 +85,8 @@ public class LocalStorageHandler implements StorageHandler {
/**
* Set Volume Quota Info.
*
* @param args - volumeArgs
* @param args - volumeArgs
* @param remove - true if the request is to remove the quota
*
* @throws IOException
*/
@Override
@ -93,23 +95,19 @@ public class LocalStorageHandler implements StorageHandler {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
if(remove) {
if (remove) {
OzoneQuota quota = new OzoneQuota();
args.setQuota(quota);
}
oz.setVolumeProperty(args, OzoneMetadataManager.VolumeProperty.QUOTA);
}
/**
* Checks if a Volume exists and the user specified has access to the
* volume.
* Checks if a Volume exists and the user specified has access to the volume.
*
* @param args - volumeArgs
*
* @return - Boolean - True if the user can modify the volume.
* This is possible for owners of the volume and admin users
*
* @return - Boolean - True if the user can modify the volume. This is
* possible for owners of the volume and admin users
* @throws IOException
*/
@Override
@ -120,14 +118,11 @@ public class LocalStorageHandler implements StorageHandler {
return oz.checkVolumeAccess(args);
}
/**
* Returns Info about the specified Volume.
*
* @param args - volumeArgs
*
* @return VolumeInfo
*
* @throws IOException
*/
@Override
@ -138,12 +133,10 @@ public class LocalStorageHandler implements StorageHandler {
return oz.getVolumeInfo(args);
}
/**
* Deletes an Empty Volume.
*
* @param args - Volume Args
*
* @throws IOException
*/
@Override
@ -158,9 +151,7 @@ public class LocalStorageHandler implements StorageHandler {
* Returns the List of Volumes owned by the specific user.
*
* @param args - UserArgs
*
* @return - List of Volumes
*
* @throws IOException
*/
@Override
@ -172,11 +163,10 @@ public class LocalStorageHandler implements StorageHandler {
}
/**
* true if the bucket exists and user has read access
* to the bucket else throws Exception.
* true if the bucket exists and user has read access to the bucket else
* throws Exception.
*
* @param args Bucket args structure
*
* @throws IOException
*/
@Override
@ -189,7 +179,6 @@ public class LocalStorageHandler implements StorageHandler {
* Creates a Bucket in specified Volume.
*
* @param args BucketArgs- BucketName, UserName and Acls
*
* @throws IOException
*/
@Override
@ -203,7 +192,6 @@ public class LocalStorageHandler implements StorageHandler {
* Adds or Removes ACLs from a Bucket.
*
* @param args - BucketArgs
*
* @throws IOException
*/
@Override
@ -218,7 +206,6 @@ public class LocalStorageHandler implements StorageHandler {
* Enables or disables Bucket Versioning.
*
* @param args - BucketArgs
*
* @throws IOException
*/
@Override
@ -234,7 +221,6 @@ public class LocalStorageHandler implements StorageHandler {
* Sets the Storage Class of a Bucket.
*
* @param args - BucketArgs
*
* @throws IOException
*/
@Override
@ -250,7 +236,6 @@ public class LocalStorageHandler implements StorageHandler {
* Deletes a bucket if it is empty.
*
* @param args Bucket args structure
*
* @throws IOException
*/
@Override
@ -264,9 +249,7 @@ public class LocalStorageHandler implements StorageHandler {
* Returns all Buckets of a specified Volume.
*
* @param args --User Args
*
* @return ListAllBuckets
*
* @throws OzoneException
*/
@Override
@ -281,9 +264,7 @@ public class LocalStorageHandler implements StorageHandler {
* Returns Bucket's Metadata as a String.
*
* @param args Bucket args structure
*
* @return Info about the bucket
*
* @throws IOException
*/
@Override
@ -293,4 +274,68 @@ public class LocalStorageHandler implements StorageHandler {
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.getBucketInfo(args);
}
/**
* Writes a key in an existing bucket.
*
* @param args KeyArgs
* @return InputStream
* @throws OzoneException
*/
@Override
public OutputStream newKeyWriter(KeyArgs args) throws IOException,
OzoneException {
return null;
}
/**
* Tells the file system that the object has been written out completely and
* it can do any house keeping operation that needs to be done.
*
* @param args Key Args
* @param stream
* @throws IOException
*/
@Override
public void commitKey(KeyArgs args, OutputStream stream) throws
IOException, OzoneException {
}
/**
* Reads a key from an existing bucket.
*
* @param args KeyArgs
* @return LengthInputStream
* @throws IOException
*/
@Override
public LengthInputStream newKeyReader(KeyArgs args) throws IOException,
OzoneException {
return null;
}
/**
* Deletes an existing key.
*
* @param args KeyArgs
* @throws OzoneException
*/
@Override
public void deleteKey(KeyArgs args) throws IOException, OzoneException {
}
/**
* Returns a list of Key.
*
* @param args KeyArgs
* @return BucketList
* @throws IOException
*/
@Override
public ListKeys listKeys(ListArgs args) throws IOException, OzoneException {
return null;
}
}

View File

@ -18,18 +18,23 @@
package org.apache.hadoop.ozone.web.storage;
import java.io.IOException;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
import org.apache.hadoop.ozone.web.handlers.ListArgs;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.ListBuckets;
import org.apache.hadoop.ozone.web.response.ListKeys;
import org.apache.hadoop.ozone.web.response.ListVolumes;
import org.apache.hadoop.ozone.web.response.VolumeInfo;
import java.io.IOException;
import java.io.OutputStream;
/**
* A {@link StorageHandler} implementation that distributes object storage
* across the nodes of an HDFS cluster.
@ -125,4 +130,67 @@ public final class DistributedStorageHandler implements StorageHandler {
throws IOException, OzoneException {
return null;
}
/**
* Writes a key in an existing bucket.
*
* @param args KeyArgs
* @return InputStream
* @throws OzoneException
*/
@Override
public OutputStream newKeyWriter(KeyArgs args) throws IOException,
OzoneException {
return null;
}
/**
* Tells the file system that the object has been written out completely and
* it can do any house keeping operation that needs to be done.
*
* @param args Key Args
* @param stream
* @throws IOException
*/
@Override
public void commitKey(KeyArgs args, OutputStream stream) throws
IOException, OzoneException {
}
/**
* Reads a key from an existing bucket.
*
* @param args KeyArgs
* @return LengthInputStream
* @throws IOException
*/
@Override
public LengthInputStream newKeyReader(KeyArgs args) throws IOException,
OzoneException {
return null;
}
/**
* Deletes an existing key.
*
* @param args KeyArgs
* @throws OzoneException
*/
@Override
public void deleteKey(KeyArgs args) throws IOException, OzoneException {
}
/**
* Returns a list of Key.
*
* @param args KeyArgs
* @return BucketList
* @throws IOException
*/
@Override
public ListKeys listKeys(ListArgs args) throws IOException, OzoneException {
return null;
}
}