diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt index 79439929cf0..8c4db07b973 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-7240.txt @@ -21,3 +21,6 @@ Arpit Agarwal) HDFS-8654. OzoneHandler : Add ACL support. (Anu Engineer via Arpit Agarwal) + + HDFS-8680. OzoneHandler : Add Local StorageHandler support for volumes. + (Anu Engineer via Arpit Agarwal) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java index 8655edc3dc1..939ed1e02eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/LocalStorageHandler.java @@ -19,12 +19,11 @@ package org.apache.hadoop.ozone.web.localstorage; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.StorageContainerConfiguration; import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.handlers.VolumeArgs; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; +import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.ozone.web.response.ListVolumes; import org.apache.hadoop.ozone.web.response.VolumeInfo; @@ -38,16 +37,10 @@ import java.io.IOException; */ @InterfaceAudience.Private public class LocalStorageHandler implements StorageHandler { - private String storageRoot = null; - /** * Constructs LocalStorageHandler. */ public LocalStorageHandler() { - StorageContainerConfiguration conf = new StorageContainerConfiguration(); - storageRoot = conf.getTrimmed( - OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, - OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT); } /** @@ -59,6 +52,9 @@ public class LocalStorageHandler implements StorageHandler { */ @Override public void createVolume(VolumeArgs args) throws IOException, OzoneException { + OzoneMetadataManager oz = OzoneMetadataManager.getOzoneMetadataManager(); + oz.createVolume(args); + } /** @@ -71,6 +67,8 @@ public class LocalStorageHandler implements StorageHandler { @Override public void setVolumeOwner(VolumeArgs args) throws IOException, OzoneException { + OzoneMetadataManager oz = OzoneMetadataManager.getOzoneMetadataManager(); + oz.setVolumeProperty(args, OzoneMetadataManager.VolumeProperty.OWNER); } /** @@ -84,6 +82,13 @@ public class LocalStorageHandler implements StorageHandler { @Override public void setVolumeQuota(VolumeArgs args, boolean remove) throws IOException, OzoneException { + OzoneMetadataManager oz = OzoneMetadataManager.getOzoneMetadataManager(); + + if(remove) { + OzoneQuota quota = new OzoneQuota(); + args.setQuota(quota); + } + oz.setVolumeProperty(args, OzoneMetadataManager.VolumeProperty.QUOTA); } @@ -96,12 +101,13 @@ public class LocalStorageHandler implements StorageHandler { * @return - Boolean - True if the user can modify the volume. * This is possible for owners of the volume and admin users * - * @throws FileSystemException + * @throws IOException */ @Override public boolean checkVolumeAccess(VolumeArgs args) throws IOException, OzoneException { - return true; + OzoneMetadataManager oz = OzoneMetadataManager.getOzoneMetadataManager(); + return oz.checkVolumeAccess(args); } @@ -117,7 +123,8 @@ public class LocalStorageHandler implements StorageHandler { @Override public VolumeInfo getVolumeInfo(VolumeArgs args) throws IOException, OzoneException { - return null; + OzoneMetadataManager oz = OzoneMetadataManager.getOzoneMetadataManager(); + return oz.getVolumeInfo(args); } @@ -130,6 +137,9 @@ public class LocalStorageHandler implements StorageHandler { */ @Override public void deleteVolume(VolumeArgs args) throws IOException, OzoneException { + OzoneMetadataManager oz = OzoneMetadataManager.getOzoneMetadataManager(); + oz.deleteVolume(args); + } /** @@ -144,7 +154,8 @@ public class LocalStorageHandler implements StorageHandler { @Override public ListVolumes listVolumes(UserArgs args) throws IOException, OzoneException { - return null; + OzoneMetadataManager oz = OzoneMetadataManager.getOzoneMetadataManager(); + return oz.listVolumes(args); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneLevelDBStore.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneLevelDBStore.java new file mode 100644 index 00000000000..a0bff682a83 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneLevelDBStore.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.web.localstorage; + +import org.fusesource.leveldbjni.JniDBFactory; +import org.iq80.leveldb.DB; +import org.iq80.leveldb.Options; + +import java.io.File; +import java.io.IOException; + +/** + * OzoneLevelDBStore is used by the local + * OzoneStore which is used in testing. + */ +class OzoneLevelDBStore { + private DB db; + + /** + * Opens a DB file. + * + * @param dbPath - DB File path + * @param createIfMissing - Create if missing + * + * @throws IOException + */ + OzoneLevelDBStore(File dbPath, boolean createIfMissing) throws IOException { + Options options = new Options(); + options.createIfMissing(createIfMissing); + db = JniDBFactory.factory.open(dbPath, options); + if (db == null) { + throw new IOException("Db is null"); + } + } + + /** + * Puts a Key into file. + * + * @param key - key + * @param value - value + */ + public void put(byte[] key, byte[] value) { + db.put(key, value); + } + + /** + * Get Key. + * + * @param key key + * + * @return value + */ + public byte[] get(byte[] key) { + return db.get(key); + } + + /** + * Delete Key. + * + * @param key - Key + */ + public void delete(byte[] key) { + db.delete(key); + } + + /** + * Closes the DB. + * + * @throws IOException + */ + public void close() throws IOException { + db.close(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java new file mode 100644 index 00000000000..22b1ed297ff --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java @@ -0,0 +1,432 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.web.localstorage; + + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.StorageContainerConfiguration; +import org.apache.hadoop.ozone.web.exceptions.ErrorTable; +import org.apache.hadoop.ozone.web.exceptions.OzoneException; +import org.apache.hadoop.ozone.web.handlers.UserArgs; +import org.apache.hadoop.ozone.web.handlers.VolumeArgs; +import org.apache.hadoop.ozone.web.response.ListVolumes; +import org.apache.hadoop.ozone.web.response.VolumeInfo; +import org.apache.hadoop.ozone.web.response.VolumeOwner; +import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.iq80.leveldb.DBException; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Locale; +import java.util.TimeZone; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * A stand alone Ozone implementation that allows us to run + * Ozone tests in local mode. This acts as the + * ozone backend when using MiniDFSCluster for testing. + */ +public final class OzoneMetadataManager { + static final Log LOG = LogFactory.getLog(OzoneMetadataManager.class); + private static OzoneMetadataManager bm = null; + + /* + OzoneMetadataManager manages volume/bucket/object metadata and + data. + + Metadata is maintained in 2 level DB files, UserDB and MetadataDB. + + UserDB contains a Name and a List. For example volumes owned by the user + bilbo, would be maintained in UserDB as {bilbo}->{shire, rings} + + This list part of mapping is context sensitive. That is, if you use {user + name} as the key, the list you get is a list of volumes. if you use + {user/volume} as the key the list you get is list of buckets. if you use + {user/volume/bucket} as key the list you get is the list of objects. + + All keys in the UserDB starts with the UserName. + + We also need to maintain a flat namespace for volumes. This is + maintained by the MetadataDB. MetadataDB contains the name of an + object(volume, bucket or key) and its associated metadata. + The keys in the Metadata DB are {volume}, {volume/bucket} or + {volume/bucket/key}. User name is absent, so we have a common root name + space for the volume. + + The value of part of metadataDB points to corresponding *Info structures. + {volume] -> volumeInfo + {volume/bucket} -> bucketInfo + {volume/bucket/key} -> keyInfo + + + Here are various work flows : + + CreateVolume -> Check if Volume exists in metadataDB, if not update UserDB + with a list of volumes and update metadataDB with VolumeInfo. + + DeleteVolume -> Check the Volume, and check the VolumeInfo->bucketCount. + if bucketCount == 0, delete volume from userDB->{List of volumes} and + metadataDB. + + Very similar work flows exist for CreateBucket and DeleteBucket. + + // Please note : These database operations are *not* transactional, + // which means that failure can lead to inconsistencies. + // Only way to recover is to reset to a clean state, or + // use rm -rf /tmp/ozone :) + + We have very simple locking policy. We have a ReaderWriter lock that is + taken for each action, this lock is aptly named "lock". + + All actions *must* be performed with a lock held, either a read + lock or a write lock. Violation of these locking policies can be harmful. + + + // // IMPORTANT : + // // This is a simulation layer, this is NOT how the real + // // OZONE functions. This is written to so that we can write + // // stand-alone tests for the protocol and client code. + +*/ + + private OzoneLevelDBStore userDB; + private OzoneLevelDBStore metadataDB; + + private static final String USER_DB = "/user.db"; + private static final String META_DB = "/metadata.db"; + + private ReadWriteLock lock; + private Charset encoding = Charset.forName("UTF-8"); + + /** + * Constructs OzoneMetadataManager. + */ + private OzoneMetadataManager() { + + lock = new ReentrantReadWriteLock(); + StorageContainerConfiguration conf = new StorageContainerConfiguration(); + + String storageRoot = + conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, + OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT); + + File file = new File(storageRoot); + + if (!file.exists() && !file.mkdirs()) { + LOG.fatal("Creation of Ozone root failed. " + file.toString()); + } + + try { + userDB = new OzoneLevelDBStore(new File(storageRoot + USER_DB), true); + metadataDB = new OzoneLevelDBStore(new File(storageRoot + META_DB), true); + } catch (IOException ex) { + LOG.fatal("Cannot open db :" + ex.getMessage()); + } + } + + /** + * Gets Ozone Manager. + * @return OzoneMetadataManager + */ + public static synchronized OzoneMetadataManager getOzoneMetadataManager() { + if (bm == null) { + bm = new OzoneMetadataManager(); + } + return bm; + } + + /** + * Creates a volume. + * + * @param args - VolumeArgs + * + * @throws OzoneException + */ + public void createVolume(VolumeArgs args) throws OzoneException { + try { + SimpleDateFormat format = + new SimpleDateFormat(OzoneConsts.OZONE_DATE_FORMAT, Locale.US); + format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); + + lock.writeLock().lock(); + byte[] volumeName = + metadataDB.get(args.getVolumeName().getBytes(encoding)); + + if (volumeName != null) { + LOG.debug("Volume already exists."); + throw ErrorTable.newError(ErrorTable.VOLUME_ALREADY_EXISTS, args); + } + + VolumeInfo newVInfo = new VolumeInfo(args.getVolumeName(), format + .format(new Date(System.currentTimeMillis())), args.getAdminName()); + + newVInfo.setQuota(args.getQuota()); + VolumeOwner owner = new VolumeOwner(args.getUserName()); + newVInfo.setOwner(owner); + + ListVolumes volumeList; + byte[] userVolumes = userDB.get(args.getUserName().getBytes(encoding)); + if (userVolumes == null) { + volumeList = new ListVolumes(); + } else { + volumeList = ListVolumes.parse(new String(userVolumes, encoding)); + } + + volumeList.addVolume(newVInfo); + volumeList.sort(); + + // Please note : These database operations are *not* transactional, + // which means that failure can lead to inconsistencies. + // Only way to recover is to reset to a clean state, or + // use rm -rf /tmp/ozone :) + + + userDB.put(args.getUserName().getBytes(encoding), + volumeList.toDBString().getBytes(encoding)); + + metadataDB.put(args.getVolumeName().getBytes(encoding), + newVInfo.toDBString().getBytes(encoding)); + + } catch (IOException | DBException ex) { + throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); + } finally { + lock.writeLock().unlock(); + } + } + + /** + * Updates the Volume properties like Owner Name and Quota. + * + * @param args - Volume Args + * @param property - Flag which tells us what property to upgrade + * + * @throws OzoneException + */ + public void setVolumeProperty(VolumeArgs args, VolumeProperty property) + throws OzoneException { + VolumeInfo info; + try { + lock.writeLock().lock(); + byte[] volumeInfo = + metadataDB.get(args.getVolumeName().getBytes(encoding)); + if (volumeInfo == null) { + throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); + } + info = VolumeInfo.parse(new String(volumeInfo, encoding)); + + byte[] userBytes = userDB.get(args.getResourceName().getBytes(encoding)); + ListVolumes volumeList; + if (userBytes == null) { + volumeList = new ListVolumes(); + } else { + volumeList = ListVolumes.parse(new String(userBytes, encoding)); + } + + switch (property) { + case OWNER: + // needs new owner, we delete the volume object from the + // old user's volume list + removeOldOwner(info); + VolumeOwner owner = new VolumeOwner(args.getUserName()); + // set the new owner + info.setOwner(owner); + break; + case QUOTA: + // if this is quota update we just remove the old object from the + // current users list and update the same object later. + volumeList.getVolumes().remove(info); + info.setQuota(args.getQuota()); + break; + default: + OzoneException ozEx = + ErrorTable.newError(ErrorTable.SERVER_ERROR, args); + ozEx.setMessage("Volume property is not recognized"); + throw ozEx; + } + + volumeList.addVolume(info); + + metadataDB.put(args.getVolumeName().getBytes(encoding), + info.toDBString().getBytes(encoding)); + + // if this is an owner change this put will create a new owner or update + // the owner's volume list. + userDB.put(args.getResourceName().getBytes(encoding), + volumeList.toDBString().getBytes(encoding)); + + } catch (IOException | DBException ex) { + throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); + } finally { + lock.writeLock().unlock(); + } + } + + /** + * Removes the old owner from the volume. + * + * @param info - VolumeInfo + * + * @throws IOException + */ + private void removeOldOwner(VolumeInfo info) throws IOException { + // We need to look the owner that we know is the current owner + byte[] volumeBytes = + userDB.get(info.getOwner().getName().getBytes(encoding)); + ListVolumes volumeList = + ListVolumes.parse(new String(volumeBytes, encoding)); + volumeList.getVolumes().remove(info); + + // Write the new list info to the old user data + userDB.put(info.getOwner().getName().getBytes(encoding), + volumeList.toDBString().getBytes(encoding)); + } + + /** + * Checks if you are the owner of a specific volume. + * + * @param args - VolumeArgs + * + * @return - True if you are the owner, false otherwise + * + * @throws OzoneException + */ + public boolean checkVolumeAccess(VolumeArgs args) throws OzoneException { + VolumeInfo info; + try { + lock.readLock().lock(); + byte[] volumeInfo = + metadataDB.get(args.getVolumeName().getBytes(encoding)); + if (volumeInfo == null) { + throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); + } + info = VolumeInfo.parse(new String(volumeInfo, encoding)); + return info.getOwner().getName().equals(args.getUserName()); + } catch (IOException | DBException ex) { + throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); + } finally { + lock.readLock().unlock(); + } + } + + /** + * getVolumeInfo returns the Volume Info of a specific volume. + * + * @param args - Volume args + * + * @return VolumeInfo + * + * @throws OzoneException + */ + public VolumeInfo getVolumeInfo(VolumeArgs args) throws OzoneException { + try { + lock.readLock().lock(); + byte[] volumeInfo = + metadataDB.get(args.getVolumeName().getBytes(encoding)); + if (volumeInfo == null) { + throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); + } + + return VolumeInfo.parse(new String(volumeInfo, encoding)); + } catch (IOException | DBException ex) { + throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); + } finally { + lock.readLock().unlock(); + } + } + + /** + * Returns all the volumes owned by a specific user. + * + * @param args - User Args + * + * @return - ListVolumes + * + * @throws OzoneException + */ + public ListVolumes listVolumes(UserArgs args) throws OzoneException { + try { + lock.readLock().lock(); + byte[] volumeList = userDB.get(args.getUserName().getBytes(encoding)); + if (volumeList == null) { + throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args); + } + return ListVolumes.parse(new String(volumeList, encoding)); + } catch (IOException | DBException ex) { + throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); + } finally { + lock.readLock().unlock(); + } + } + + /** + * Deletes a volume if it exists and is empty. + * + * @param args - volume args + * + * @throws OzoneException + */ + public void deleteVolume(VolumeArgs args) throws OzoneException { + try { + lock.writeLock().lock(); + byte[] volumeName = + metadataDB.get(args.getVolumeName().getBytes(encoding)); + if (volumeName == null) { + throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); + } + + VolumeInfo vInfo = VolumeInfo.parse(new String(volumeName, encoding)); + + // Only remove volumes if they are empty. + if (vInfo.getBucketCount() > 0) { + throw ErrorTable.newError(ErrorTable.VOLUME_NOT_EMPTY, args); + } + + ListVolumes volumeList; + String user = vInfo.getOwner().getName(); + byte[] userVolumes = userDB.get(user.getBytes(encoding)); + if (userVolumes == null) { + throw ErrorTable.newError(ErrorTable.VOLUME_NOT_FOUND, args); + } + + volumeList = ListVolumes.parse(new String(userVolumes, encoding)); + volumeList.getVolumes().remove(vInfo); + + metadataDB.delete(args.getVolumeName().getBytes(encoding)); + userDB.put(user.getBytes(encoding), + volumeList.toDBString().getBytes(encoding)); + } catch (IOException | DBException ex) { + throw ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); + } finally { + lock.writeLock().unlock(); + } + } + + + /** + * This is used in updates to volume metadata. + */ + public enum VolumeProperty { + OWNER, QUOTA + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java index bdd7b6d8a89..35f2995c7ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java @@ -43,6 +43,10 @@ public final class OzoneConsts { public static final String OZONE_ACL_READ_WRITE = "rw"; public static final String OZONE_ACL_WRITE_READ = "wr"; + public static final String OZONE_DATE_FORMAT = + "EEE, dd MMM yyyy HH:mm:ss zzz"; + public static final String OZONE_TIME_ZONE = "GMT"; + private OzoneConsts() { // Never Constructed } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index c454cc4553e..c6130c1e6ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.web.exceptions.ErrorTable; import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.headers.Header; -import org.apache.hadoop.util.Time; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Request; @@ -45,6 +44,10 @@ import java.util.UUID; @InterfaceAudience.Private public final class OzoneUtils { + private OzoneUtils() { + // Never constructed + } + /** * verifies that bucket name / volume name is a valid DNS name. * @@ -128,7 +131,7 @@ public final class OzoneUtils { /** * Returns a random Request ID. * - * Request ID is returned to the client as well as flows thru the system + * Request ID is returned to the client as well as flows through the system * facilitating debugging on why a certain request failed. * * @return String random request ID @@ -206,8 +209,8 @@ public final class OzoneUtils { String resource, String hostname) throws OzoneException { SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US); - format.setTimeZone(TimeZone.getTimeZone("GMT")); + new SimpleDateFormat(OzoneConsts.OZONE_DATE_FORMAT, Locale.US); + format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); try { return format.parse(dateString); @@ -231,9 +234,9 @@ public final class OzoneUtils { public static Response getResponse(UserArgs args, int statusCode, String payload) { SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US); - format.setTimeZone(TimeZone.getTimeZone("GMT")); - String date = format.format(new Date(Time.monotonicNow())); + new SimpleDateFormat(OzoneConsts.OZONE_DATE_FORMAT, Locale.US); + format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); + String date = format.format(new Date(System.currentTimeMillis())); return Response.ok(payload) .header(Header.OZONE_SERVER_NAME, args.getHostName()) .header(Header.OZONE_REQUEST_ID, args.getRequestID()) @@ -252,17 +255,13 @@ public final class OzoneUtils { public static Response getResponse(UserArgs args, int statusCode, InputStream stream) { SimpleDateFormat format = - new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US); - format.setTimeZone(TimeZone.getTimeZone("GMT")); - String date = format.format(new Date(Time.monotonicNow())); + new SimpleDateFormat(OzoneConsts.OZONE_DATE_FORMAT, Locale.US); + format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); + String date = format.format(new Date(System.currentTimeMillis())); return Response.ok(stream) .header(Header.OZONE_SERVER_NAME, args.getHostName()) .header(Header.OZONE_REQUEST_ID, args.getRequestID()) .header(HttpHeaders.DATE, date).status(statusCode) .header(HttpHeaders.CONTENT_TYPE, "application/octet-stream").build(); } - - private OzoneUtils() { - // Never constructed - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java new file mode 100644 index 00000000000..551e7d48095 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.ozone.web; + +import org.apache.hadoop.ozone.web.response.ListVolumes; +import org.apache.hadoop.ozone.web.response.VolumeInfo; +import org.apache.hadoop.ozone.web.response.VolumeOwner; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; + +public class TestVolumeStructs { + + @Test + public void testVolumeInfoParse() throws IOException { + VolumeInfo volInfo = new VolumeInfo("testvol", + "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf"); + VolumeOwner owner = new VolumeOwner("bilbo"); + volInfo.setOwner(owner); + String jString = volInfo.toJsonString(); + VolumeInfo newVollInfo = VolumeInfo.parse(jString); + String one = volInfo.toJsonString(); + String two = newVollInfo.toJsonString(); + + assertEquals(volInfo.toJsonString(), newVollInfo.toJsonString()); + } + + @Test + public void testVolumeInfoValue() throws IOException { + String createdOn = "Thu, Apr 9, 2015 10:23:45 GMT"; + String createdBy = "gandalf"; + VolumeInfo volInfo = new VolumeInfo("testvol", + createdOn, createdBy); + assertEquals(volInfo.getCreatedBy(), createdBy); + assertEquals(volInfo.getCreatedOn(), createdOn); + } + + + @Test + public void testVolumeListParse() throws IOException { + + ListVolumes list = new ListVolumes(); + for (int x = 0; x < 100; x++) { + VolumeInfo volInfo = new VolumeInfo("testvol" + Integer.toString(x), + "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf"); + list.addVolume(volInfo); + } + list.sort(); + String listString = list.toJsonString(); + ListVolumes newList = ListVolumes.parse(listString); + assertEquals(list.toJsonString(), newList.toJsonString()); + } + +}