From 643c5e5bdc2c7c33b796a006e9643fc196c5d573 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Sat, 26 Mar 2016 11:40:00 -0700 Subject: [PATCH] HDFS-10195. Ozone: Add container persistence. Contributed by Anu Engineer. --- .../hadoop/hdfs/server/datanode/DataNode.java | 28 + .../apache/hadoop/ozone/OzoneConfigKeys.java | 3 + .../ozone/{web/utils => }/OzoneConsts.java | 16 +- .../common/helpers/ContainerData.java | 47 +- .../common/helpers/ContainerUtils.java | 144 +++++ .../impl/ContainerLocationManagerImpl.java | 113 ++++ .../common/impl/ContainerManagerImpl.java | 514 ++++++++++++++++++ .../interfaces/ContainerLocationManager.java | 44 ++ .../common/interfaces/ContainerManager.java | 33 +- .../transport/client/XceiverClient.java | 6 +- .../transport/server/XceiverServer.java | 4 +- .../common/utils/LevelDBStore.java} | 52 +- .../container/ozoneimpl/OzoneContainer.java | 121 +++++ .../container/ozoneimpl/package-info.java | 21 + .../hadoop/ozone/web/client/OzoneBucket.java | 2 +- .../hadoop/ozone/web/client/OzoneVolume.java | 2 +- .../hadoop/ozone/web/handlers/BucketArgs.java | 2 +- .../ozone/web/handlers/BucketHandler.java | 4 +- .../web/handlers/BucketProcessTemplate.java | 11 +- .../web/handlers/KeyProcessTemplate.java | 9 +- .../ozone/web/handlers/VolumeHandler.java | 2 +- .../web/handlers/VolumeProcessTemplate.java | 8 +- .../localstorage/OzoneMetadataManager.java | 11 +- .../hadoop/ozone/web/request/OzoneAcl.java | 2 +- .../hadoop/ozone/web/response/BucketInfo.java | 2 +- .../hadoop/ozone/web/userauth/Simple.java | 2 +- .../hadoop/ozone/web/utils/OzoneUtils.java | 1 + .../proto/DatanodeContainerProtocol.proto | 8 +- .../common/impl/TestContainerPersistence.java | 256 +++++++++ .../ozoneimpl/TestOzoneContainer.java | 118 ++++ .../hadoop/ozone/web/TestBucketInfo.java | 2 +- .../hadoop/ozone/web/TestOzoneVolumes.java | 2 +- .../hadoop/ozone/web/TestOzoneWebAccess.java | 3 +- .../hadoop/ozone/web/client/TestVolume.java | 3 +- 34 files changed, 1534 insertions(+), 62 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/{web/utils => }/OzoneConsts.java (78%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/{web/localstorage/OzoneLevelDBStore.java => container/common/utils/LevelDBStore.java} (67%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index cbbb9a601b2..a5d5015cba7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -188,6 +188,8 @@ import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.unix.DomainSocket; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SaslPropertiesResolver; import org.apache.hadoop.security.SecurityUtil; @@ -365,6 +367,7 @@ public class DataNode extends ReconfigurableBase private final String confVersion; private final long maxNumberOfBlocksToLog; private final boolean pipelineSupportECN; + private final boolean ozoneEnabled; private final List usersWithLocalPathAccess; private final boolean connectToDnViaHostname; @@ -387,6 +390,7 @@ public class DataNode extends ReconfigurableBase private static final int NUM_CORES = Runtime.getRuntime() .availableProcessors(); private static final double CONGESTION_RATIO = 1.5; + private OzoneContainer ozoneServer; private static Tracer createTracer(Configuration conf) { return new Tracer.Builder("DataNode"). @@ -417,6 +421,7 @@ public class DataNode extends ReconfigurableBase this.connectToDnViaHostname = false; this.blockScanner = new BlockScanner(this, conf); this.pipelineSupportECN = false; + this.ozoneEnabled = false; this.checkDiskErrorInterval = ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25)); initOOBTimeout(); @@ -451,6 +456,9 @@ public class DataNode extends ReconfigurableBase this.pipelineSupportECN = conf.getBoolean( DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED, DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED_DEFAULT); + this.ozoneEnabled = conf.getBoolean(OzoneConfigKeys + .DFS_OBJECTSTORE_ENABLED_KEY, OzoneConfigKeys + .DFS_OBJECTSTORE_ENABLED_DEFAULT); confVersion = "core-" + conf.get("hadoop.common.configuration.version", "UNSPECIFIED") + @@ -1540,6 +1548,15 @@ public class DataNode extends ReconfigurableBase data.addBlockPool(nsInfo.getBlockPoolID(), conf); blockScanner.enableBlockPoolId(bpos.getBlockPoolId()); initDirectoryScanner(conf); + if(this.ozoneEnabled) { + try { + ozoneServer = new OzoneContainer(conf, this.getFSDataset()); + ozoneServer.start(); + LOG.info("Ozone container server started."); + } catch (Exception ex) { + LOG.error("Unable to start Ozone. ex: {}", ex.toString()); + } + } } List getAllBpOs() { @@ -1828,6 +1845,17 @@ public class DataNode extends ReconfigurableBase */ public void shutdown() { stopMetricsLogger(); + + if(this.ozoneEnabled) { + if(ozoneServer != null) { + try { + ozoneServer.stop(); + } catch (Exception e) { + LOG.error("Error is ozone shutdown. ex {}", e.toString()); + } + } + } + if (plugins != null) { for (ServicePlugin p : plugins) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index ba24866e606..642b473095e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -41,6 +41,9 @@ public final class OzoneConfigKeys { "dfs.objectstore.trace.enabled"; public static final boolean DFS_OBJECTSTORE_TRACE_ENABLED_DEFAULT = false; + public static final String DFS_OZONE_METADATA_DIRS = + "dfs.ozone.metadata.dirs"; + /** * There is no need to instantiate this class. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java similarity index 78% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 80f02d6fbdc..9e528535abb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.web.utils; +package org.apache.hadoop.ozone; import org.apache.hadoop.classification.InterfaceAudience; @@ -53,6 +53,20 @@ public final class OzoneConsts { public static final String OZONE_USER = "user"; public static final String OZONE_REQUEST = "request"; + public static final String CONTAINER_EXTENSION = ".container"; + public static final String CONTAINER_META = ".meta"; + + // container storage is in the following format. + // Data Volume basePath/containers//metadata and + // Data Volume basePath/containers//data/... + public static final String CONTAINER_PREFIX = "containers"; + public static final String CONTAINER_META_PATH = "metadata"; + public static final String CONTAINER_DATA_PATH = "data"; + public static final String CONTAINER_ROOT_PREFIX = "repository"; + + public static final String CONTAINER_DB = "container.db"; + + /** * Supports Bucket Versioning. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java index 8f5120a0e48..9e3e2a3d770 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java @@ -35,8 +35,9 @@ public class ContainerData { private final String containerName; private final Map metadata; - - private String path; + private String dbPath; // Path to Level DB Store. + // Path to Physical file system where container and checksum are stored. + private String containerFilePath; /** * Constructs a ContainerData Object. @@ -63,8 +64,13 @@ public class ContainerData { } if (protoData.hasContainerPath()) { - data.setPath(protoData.getContainerPath()); + data.setContainerPath(protoData.getContainerPath()); } + + if (protoData.hasDbPath()) { + data.setDBPath(protoData.getDbPath()); + } + return data; } @@ -77,9 +83,15 @@ public class ContainerData { ContainerProtos.ContainerData.Builder builder = ContainerProtos .ContainerData.newBuilder(); builder.setName(this.getContainerName()); - if (this.getPath() != null) { - builder.setContainerPath(this.getPath()); + + if (this.getDBPath() != null) { + builder.setDbPath(this.getDBPath()); } + + if (this.getContainerPath() != null) { + builder.setContainerPath(this.getContainerPath()); + } + for (Map.Entry entry : metadata.entrySet()) { ContainerProtos.KeyValue.Builder keyValBuilder = ContainerProtos.KeyValue.newBuilder(); @@ -144,8 +156,8 @@ public class ContainerData { * * @return - path */ - public String getPath() { - return path; + public String getDBPath() { + return dbPath; } /** @@ -153,8 +165,8 @@ public class ContainerData { * * @param path - String. */ - public void setPath(String path) { - this.path = path; + public void setDBPath(String path) { + this.dbPath = path; } /** @@ -167,4 +179,21 @@ public class ContainerData { public String getName() { return getContainerName(); } + + /** + * Get container file path. + * @return - Physical path where container file and checksum is stored. + */ + public String getContainerPath() { + return containerFilePath; + } + + /** + * Set container Path. + * @param containerFilePath - File path. + */ + public void setContainerPath(String containerFilePath) { + this.containerFilePath = containerFilePath; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index 23e1804a9c9..79e9aeb6cd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -19,7 +19,23 @@ package org.apache.hadoop.ozone.container.common.helpers; import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl; +import org.apache.hadoop.ozone.container.common.utils.LevelDBStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static org.apache.commons.io.FilenameUtils.removeExtension; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META; /** * A set of helper functions to create proper responses. @@ -104,6 +120,134 @@ public final class ContainerUtils { "Server does not support this command yet.").build(); } + /** + * get containerName from a container file. + * + * @param containerFile - File + * @return Name of the container. + */ + public static String getContainerNameFromFile(File containerFile) { + Preconditions.checkNotNull(containerFile); + return Paths.get(containerFile.getParent()).resolve( + removeExtension(containerFile.getName())).toString(); + + } + + /** + * Verifies that this in indeed a new container. + * + * @param containerFile - Container File to verify + * @param metadataFile - metadata File to verify + * @throws IOException + */ + public static void verifyIsNewContainer(File containerFile, File metadataFile) + throws IOException { + Logger log = LoggerFactory.getLogger(ContainerManagerImpl.class); + if (containerFile.exists()) { + log.error("container already exists on disk. File: {}", + containerFile.toPath()); + throw new FileAlreadyExistsException("container already exists on " + + "disk."); + } + + if (metadataFile.exists()) { + log.error("metadata found on disk, but missing container. Refusing to" + + " write this container. File: {} ", metadataFile.toPath()); + throw new FileAlreadyExistsException(("metadata found on disk, but " + + "missing container. Refusing to write this container.")); + } + + File parentPath = new File(containerFile.getParent()); + + if (!parentPath.exists() && !parentPath.mkdirs()) { + log.error("Unable to create parent path. Path: {}", + parentPath.toString()); + throw new IOException("Unable to create container directory."); + } + + if (!containerFile.createNewFile()) { + log.error("creation of a new container file failed. File: {}", + containerFile.toPath()); + throw new IOException("creation of a new container file failed."); + } + + if (!metadataFile.createNewFile()) { + log.error("creation of the metadata file failed. File: {}", + metadataFile.toPath()); + throw new IOException("creation of a new container file failed."); + } + } + + /** + * creates a Metadata DB for the specified container. + * + * @param containerPath - Container Path. + * @throws IOException + */ + public static Path createMetadata(Path containerPath) throws IOException { + Preconditions.checkNotNull(containerPath); + containerPath = containerPath.resolve(OzoneConsts.CONTAINER_META_PATH); + if (!containerPath.toFile().mkdirs()) { + throw new IOException("Unable to create directory for metadata storage." + + " Path {}" + containerPath); + } + containerPath = containerPath.resolve(OzoneConsts.CONTAINER_DB); + LevelDBStore store = new LevelDBStore(containerPath.toFile(), true); + + // we close since the SCM pre-creates containers. + // we will open and put Db handle into a cache when keys are being created + // in a container. + + store.close(); + return containerPath; + } + + /** + * remove Container if it is empty. + *

+ * There are three things we need to delete. + *

+ * 1. Container file and metadata file. 2. The Level DB file 3. The path that + * we created on the data location. + * + * @param containerData - Data of the container to remove. + * @throws IOException + */ + public static void removeContainer(ContainerData containerData) throws + IOException { + Preconditions.checkNotNull(containerData); + + // TODO : Check if there are any keys. This needs to be done + // by calling into key layer code, hence this is a TODO for now. + + Path dbPath = Paths.get(containerData.getDBPath()); + + // Delete the DB File. + FileUtils.forceDelete(dbPath.toFile()); + dbPath = dbPath.getParent(); + + // Delete all Metadata in the Data directories for this containers. + if (dbPath != null) { + FileUtils.deleteDirectory(dbPath.toFile()); + dbPath = dbPath.getParent(); + } + + // now delete the container directory, this means that all key data dirs + // will be removed too. + if (dbPath != null) { + FileUtils.deleteDirectory(dbPath.toFile()); + } + + // Delete the container metadata from the metadata locations. + String rootPath = getContainerNameFromFile(new File(containerData + .getContainerPath())); + Path containerPath = Paths.get(rootPath.concat(CONTAINER_EXTENSION)); + Path metaPath = Paths.get(rootPath.concat(CONTAINER_META)); + + FileUtils.forceDelete(containerPath.toFile()); + FileUtils.forceDelete(metaPath.toFile()); + } + private ContainerUtils() { //never constructed. } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java new file mode 100644 index 00000000000..f3676304512 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerLocationManager; + + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.LinkedList; +import java.util.List; + +/** + * A class that tells the ContainerManager where to place the containers. + * Please note : There is *no* one-to-one correlation between metadata + * locations and data locations. + * + * For example : A user could map all container files to a + * SSD but leave data/metadata on bunch of other disks. + */ +public class ContainerLocationManagerImpl implements ContainerLocationManager { + private static final Logger LOG = + LoggerFactory.getLogger(ContainerLocationManagerImpl.class); + + + private final Configuration conf; + private final FsDatasetSpi dataset; + private final Path[] volumePaths; + private int currentIndex; + private final List locations; + + + /** + * Constructs a Location Manager. + * @param conf - Configuration. + */ + public ContainerLocationManagerImpl(Configuration conf, List locations, + FsDatasetSpi dataset) throws IOException { + this.conf = conf; + this.dataset = dataset; + List pathList = new LinkedList<>(); + FsDatasetSpi.FsVolumeReferences references; + try { + synchronized (this.dataset) { + references = this.dataset.getFsVolumeReferences(); + for (int ndx = 0; ndx < references.size(); ndx++) { + FsVolumeSpi vol = references.get(ndx); + pathList.add(Paths.get(vol.getBasePath())); + } + references.close(); + volumePaths = pathList.toArray(new Path[pathList.size()]); + this.locations = locations; + } + } catch (IOException ex) { + LOG.error("Unable to get volume paths.", ex); + throw new IOException("Internal error", ex); + } + + } + + /** + * Returns the path where the container should be placed from a set of + * locations. + * + * @return A path where we should place this container and metadata. + * @throws IOException + */ + @Override + public Path getContainerPath() + throws IOException { + Preconditions.checkState(locations.size() > 0); + int index = currentIndex % locations.size(); + return locations.get(index).resolve(OzoneConsts.CONTAINER_ROOT_PREFIX); + } + + /** + * Returns the path where the container Data file are stored. + * + * @return a path where we place the LevelDB and data files of a container. + * @throws IOException + */ + @Override + public Path getDataPath(String containerName) throws IOException { + Path currentPath = volumePaths[currentIndex++ % volumePaths.length]; + currentPath = currentPath.resolve(OzoneConsts.CONTAINER_PREFIX); + return currentPath.resolve(containerName); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java new file mode 100644 index 00000000000..599b2f28ead --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java @@ -0,0 +1,514 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ozone.container.common.helpers.ContainerData; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.helpers.Pipeline; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerLocationManager; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.DigestInputStream; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META; + +/** + * A Generic ContainerManagerImpl that will be called from Ozone + * ContainerManagerImpl. This allows us to support delta changes to ozone + * version without having to rewrite the containerManager. + */ +public class ContainerManagerImpl implements ContainerManager { + static final Logger LOG = + LoggerFactory.getLogger(ContainerManagerImpl.class); + + private final ConcurrentSkipListMap + containerMap = new ConcurrentSkipListMap<>(); + + // This lock follows fair locking policy of first-come first-serve + // for waiting threads. + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true); + private ContainerLocationManager locationManager; + + /** + * Init call that sets up a container Manager. + * + * @param config - Configuration. + * @param containerDirs - List of Metadata Container locations. + * @throws IOException + */ + @Override + public void init(Configuration config, List containerDirs, + FsDatasetSpi dataset) + throws IOException { + + Preconditions.checkNotNull(config); + Preconditions.checkNotNull(containerDirs); + Preconditions.checkState(containerDirs.size() > 0); + + readLock(); + try { + for (Path path : containerDirs) { + File directory = path.toFile(); + if (!directory.isDirectory()) { + LOG.error("Invalid path to container metadata directory. path: {}", + path.toString()); + throw new IOException("Invalid path to container metadata directory" + + ". " + path); + } + File[] files = directory.listFiles(new ContainerFilter()); + if (files != null) { + for (File containerFile : files) { + String containerPath = + ContainerUtils.getContainerNameFromFile(containerFile); + Preconditions.checkNotNull(containerPath); + readContainerInfo(containerPath); + } + } + } + this.locationManager = new ContainerLocationManagerImpl(config, + containerDirs, dataset); + + } finally { + readUnlock(); + } + } + + /** + * Reads the Container Info from a file and verifies that checksum match. If + * the checksums match, then that file is added to containerMap. + * + * @param containerName - Name which points to the persisted container. + */ + private void readContainerInfo(String containerName) + throws IOException { + Preconditions.checkState(containerName.length() > 0); + FileInputStream containerStream = null; + DigestInputStream dis = null; + FileInputStream metaStream = null; + Path cPath = Paths.get(containerName).getFileName(); + String keyName = null; + if (cPath != null) { + keyName = cPath.toString(); + } + Preconditions.checkNotNull(keyName); + + try { + String containerFileName = containerName.concat(CONTAINER_EXTENSION); + String metaFileName = containerName.concat(CONTAINER_META); + + containerStream = new FileInputStream(containerFileName); + + metaStream = new FileInputStream(metaFileName); + + MessageDigest sha = MessageDigest.getInstance("SHA-256"); + + dis = new DigestInputStream(containerStream, sha); + + ContainerData containerData = ContainerData.getFromProtBuf( + ContainerProtos.ContainerData.parseDelimitedFrom(dis)); + + + ContainerProtos.ContainerMeta meta = ContainerProtos.ContainerMeta + .parseDelimitedFrom(metaStream); + + if (meta != null && !DigestUtils.sha256Hex(sha.digest()).equals(meta + .getHash())) { + throw new IOException("Invalid SHA found for file."); + } + + containerMap.put(keyName, new ContainerStatus(containerData, true)); + + } catch (IOException | NoSuchAlgorithmException ex) { + LOG.error("read failed for file: {} ex: {}", + containerName, ex.getMessage()); + + // TODO : Add this file to a recovery Queue. + + // Remember that this container is busted and we cannot use it. + containerMap.put(keyName, new ContainerStatus(null, false)); + } finally { + IOUtils.closeStream(dis); + IOUtils.closeStream(containerStream); + IOUtils.closeStream(metaStream); + } + } + + /** + * Creates a container with the given name. + * + * @param pipeline -- Nodes which make up this container. + * @param containerData - Container Name and metadata. + * @throws IOException + */ + @Override + public void createContainer(Pipeline pipeline, ContainerData containerData) + throws IOException { + Preconditions.checkNotNull(containerData); + + writeLock(); + try { + if (containerMap.containsKey(containerData.getName())) { + throw new FileAlreadyExistsException("container already exists."); + } + + // This is by design. We first write and close the + // container Info and metadata to a directory. + // Then read back and put that info into the containerMap. + // This allows us to make sure that our write is consistent. + + writeContainerInfo(containerData); + File cFile = new File(containerData.getContainerPath()); + readContainerInfo(ContainerUtils.getContainerNameFromFile(cFile)); + } catch (NoSuchAlgorithmException ex) { + throw new IOException("failed to create container", ex); + + } finally { + writeUnlock(); + } + + } + + /** + * Writes a container to a chosen location and updates the container Map. + * + * The file formats of ContainerData and Container Meta is the following. + * + * message ContainerData { + * required string name = 1; + * repeated KeyValue metadata = 2; + * optional string dbPath = 3; + * optional string containerPath = 4; + * } + * + * message ContainerMeta { + * required string fileName = 1; + * required string hash = 2; + * } + * + * @param containerData - container Data + */ + private void writeContainerInfo(ContainerData containerData) + throws IOException, NoSuchAlgorithmException { + + Preconditions.checkNotNull(this.locationManager); + + FileOutputStream containerStream = null; + DigestOutputStream dos = null; + FileOutputStream metaStream = null; + Path location = locationManager.getContainerPath(); + + File containerFile = location.resolve(containerData + .getContainerName().concat(CONTAINER_EXTENSION)) + .toFile(); + + File metadataFile = location.resolve(containerData + .getContainerName().concat(CONTAINER_META)) + .toFile(); + + try { + ContainerUtils.verifyIsNewContainer(containerFile, metadataFile); + + Path metadataPath = this.locationManager.getDataPath( + containerData.getContainerName()); + metadataPath = ContainerUtils.createMetadata(metadataPath); + + containerStream = new FileOutputStream(containerFile); + metaStream = new FileOutputStream(metadataFile); + MessageDigest sha = MessageDigest.getInstance("SHA-256"); + + dos = new DigestOutputStream(containerStream, sha); + containerData.setDBPath(metadataPath.toString()); + containerData.setContainerPath(containerFile.toString()); + + ContainerProtos.ContainerData protoData = containerData + .getProtoBufMessage(); + protoData.writeDelimitedTo(dos); + + ContainerProtos.ContainerMeta protoMeta = ContainerProtos + .ContainerMeta.newBuilder() + .setFileName(containerFile.toString()) + .setHash(DigestUtils.sha256Hex(sha.digest())) + .build(); + protoMeta.writeDelimitedTo(metaStream); + + } catch (IOException ex) { + + // TODO : we need to clean up partially constructed files + // The proper way to do would be for a thread + // to read all these 3 artifacts and make sure they are + // sane. That info needs to come from the replication + // pipeline, and if not consistent delete these file. + + // In case of ozone this is *not* a deal breaker since + // SCM is guaranteed to generate unique container names. + + LOG.error("creation of container failed. Name: {} " + , containerData.getContainerName()); + throw ex; + } finally { + IOUtils.closeStream(dos); + IOUtils.closeStream(containerStream); + IOUtils.closeStream(metaStream); + } + } + + /** + * Deletes an existing container. + * + * @param pipeline - nodes that make this container. + * @param containerName - name of the container. + * @throws IOException + */ + @Override + public void deleteContainer(Pipeline pipeline, String containerName) throws + IOException { + Preconditions.checkState(containerName.length() > 0); + writeLock(); + try { + ContainerStatus status = containerMap.get(containerName); + if (status == null) { + LOG.info("No such container. Name: {}", containerName); + throw new IOException("No such container. Name : " + containerName); + } + ContainerUtils.removeContainer(status.containerData); + containerMap.remove(containerName); + } finally { + writeUnlock(); + } + + } + + /** + * A simple interface for container Iterations. + *

+ * This call make no guarantees about consistency of the data between + * different list calls. It just returns the best known data at that point of + * time. It is possible that using this iteration you can miss certain + * container from the listing. + * + * @param prevKey - Previous Key Value or empty String. + * @param count - how many to return + * @param data - Actual containerData + * @throws IOException + */ + @Override + public void listContainer(String prevKey, long count, + List data) throws IOException { + Preconditions.checkNotNull(data); + readLock(); + try { + ConcurrentNavigableMap map = null; + if (prevKey.length() == 0) { + map = containerMap.tailMap(containerMap.firstKey(), true); + } else { + map = containerMap.tailMap(prevKey, false); + } + + int currentCount = 0; + for (ContainerStatus entry : map.values()) { + if (currentCount < count) { + data.add(entry.getContainer()); + currentCount++; + } else { + return; + } + } + } finally { + readUnlock(); + } + } + + /** + * Get metadata about a specific container. + * + * @param containerName - Name of the container + * @return ContainerData - Container Data. + * @throws IOException + */ + @Override + public ContainerData readContainer(String containerName) throws IOException { + return containerMap.get(containerName).getContainer(); + } + + /** + * Supports clean shutdown of container. + * + * @throws IOException + */ + @Override + public void shutdown() throws IOException { + + } + + + @VisibleForTesting + ConcurrentSkipListMap getContainerMap() { + return containerMap; + } + + /** + * Acquire read lock. + */ + @Override + public void readLock() { + this.lock.readLock().lock(); + + } + + /** + * Release read lock. + */ + @Override + public void readUnlock() { + this.lock.readLock().unlock(); + } + + /** + * Check if the current thread holds read lock. + */ + @Override + public boolean hasReadLock() { + return this.lock.readLock().tryLock(); + } + + /** + * Acquire write lock. + */ + @Override + public void writeLock() { + this.lock.writeLock().lock(); + } + + /** + * Acquire write lock, unless interrupted while waiting. + */ + @Override + public void writeLockInterruptibly() throws InterruptedException { + this.lock.writeLock().lockInterruptibly(); + + } + + /** + * Release write lock. + */ + @Override + public void writeUnlock() { + this.lock.writeLock().unlock(); + + } + + /** + * Check if the current thread holds write lock. + */ + @Override + public boolean hasWriteLock() { + return this.lock.writeLock().isHeldByCurrentThread(); + } + + /** + * Filter out only container files from the container metadata dir. + */ + private static class ContainerFilter implements FilenameFilter { + /** + * Tests if a specified file should be included in a file list. + * + * @param dir the directory in which the file was found. + * @param name the name of the file. + * @return true if and only if the name should be included in + * the file list; false otherwise. + */ + @Override + public boolean accept(File dir, String name) { + return name.endsWith(CONTAINER_EXTENSION); + } + } + + /** + * This is an immutable class that represents the state of a container. if the + * container reading encountered an error when we boot up we will post that + * info to a recovery queue and keep the info in the containerMap. + *

+ * if and when the issue is fixed, the expectation is that this entry will be + * deleted by the recovery thread from the containerMap and will insert entry + * instead of modifying this class. + */ + @VisibleForTesting + static class ContainerStatus { + private final ContainerData containerData; + private final boolean active; + + /** + * Creates a Container Status class. + * + * @param containerData - ContainerData. + * @param active - Active or not active. + */ + public ContainerStatus(ContainerData containerData, boolean active) { + this.containerData = containerData; + this.active = active; + } + + /** + * Returns container if it is active. It is not active if we have had an + * error and we are waiting for the background threads to fix the issue. + * + * @return ContainerData. + */ + public ContainerData getContainer() { + if (active) { + return containerData; + } + return null; + } + + /** + * Indicates if a container is Active. + * + * @return + */ + public boolean isActive() { + return active; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java new file mode 100644 index 00000000000..00e7223d212 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.common.interfaces; + +import java.io.IOException; +import java.nio.file.Path; + +/** + * Returns physical path locations, where the containers will be created. + */ +public interface ContainerLocationManager { + /** + * Returns the path where the container should be placed from a set of + * locations. + * + * @return A path where we should place this container and metadata. + * @throws IOException + */ + Path getContainerPath() throws IOException; + + /** + * Returns the path where the container Data file are stored. + * + * @return a path where we place the LevelDB and data files of a container. + * @throws IOException + */ + Path getDataPath(String containerName) throws IOException; + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java index 780d932fb6c..a0ddcd9c250 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java @@ -20,18 +20,35 @@ package org.apache.hadoop.ozone.container.common.interfaces; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.Pipeline; import java.io.IOException; +import java.nio.file.Path; import java.util.List; + /** * Interface for container operations. */ @InterfaceAudience.Private @InterfaceStability.Unstable -public interface ContainerManager { +public interface ContainerManager extends RwLock { + + /** + * Init call that sets up a container Manager. + * + * @param config - Configuration. + * @param containerDirs - List of Metadata Container locations. + * @param dataset - FSDataset. + * @throws IOException + */ + void init(Configuration config, List containerDirs, + FsDatasetSpi dataset) + throws IOException; /** * Creates a container with the given name. @@ -56,20 +73,28 @@ public interface ContainerManager { /** * As simple interface for container Iterations. * - * @param start - Starting index + * @param prevKey - Starting KeyValue * @param count - how many to return * @param data - Actual containerData * @throws IOException */ - void listContainer(long start, long count, List data) + void listContainer(String prevKey, long count, List data) throws IOException; /** * Get metadata about a specific container. * * @param containerName - Name of the container - * @return ContainerData + * @return ContainerData - Container Data. * @throws IOException */ ContainerData readContainer(String containerName) throws IOException; + + /** + * Supports clean shutdown of container. + * + * @throws IOException + */ + void shutdown() throws IOException; + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java index 05cd44a3a8d..3b9ba8da72c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java @@ -26,10 +26,10 @@ import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.helpers.Pipeline; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,7 +42,7 @@ import java.io.IOException; public class XceiverClient { static final Logger LOG = LoggerFactory.getLogger(XceiverClient.class); private final Pipeline pipeline; - private final OzoneConfiguration config; + private final Configuration config; private ChannelFuture channelFuture; private Bootstrap b; private EventLoopGroup group; @@ -53,7 +53,7 @@ public class XceiverClient { * @param pipeline - Pipeline that defines the machines. * @param config -- Ozone Config */ - public XceiverClient(Pipeline pipeline, OzoneConfiguration config) { + public XceiverClient(Pipeline pipeline, Configuration config) { Preconditions.checkNotNull(pipeline); Preconditions.checkNotNull(config); this.pipeline = pipeline; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java index 77e4af17f9f..628b7412191 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java @@ -26,8 +26,8 @@ import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; /** @@ -47,7 +47,7 @@ public final class XceiverServer { * * @param conf - Configuration */ - public XceiverServer(OzoneConfiguration conf, + public XceiverServer(Configuration conf, ContainerDispatcher dispatcher) { Preconditions.checkNotNull(conf); this.port = conf.getInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneLevelDBStore.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/LevelDBStore.java similarity index 67% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneLevelDBStore.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/LevelDBStore.java index a0bff682a83..2a2c5cca732 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneLevelDBStore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/utils/LevelDBStore.java @@ -16,43 +16,45 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.web.localstorage; +package org.apache.hadoop.ozone.container.common.utils; import org.fusesource.leveldbjni.JniDBFactory; import org.iq80.leveldb.DB; +import org.iq80.leveldb.DBIterator; import org.iq80.leveldb.Options; import java.io.File; import java.io.IOException; /** - * OzoneLevelDBStore is used by the local - * OzoneStore which is used in testing. + * LevelDB interface. */ -class OzoneLevelDBStore { +public class LevelDBStore { private DB db; + private final File dbFile; /** * Opens a DB file. * - * @param dbPath - DB File path + * @param dbPath - DB File path * @param createIfMissing - Create if missing - * * @throws IOException */ - OzoneLevelDBStore(File dbPath, boolean createIfMissing) throws IOException { + public LevelDBStore(File dbPath, boolean createIfMissing) throws + IOException { Options options = new Options(); options.createIfMissing(createIfMissing); db = JniDBFactory.factory.open(dbPath, options); if (db == null) { throw new IOException("Db is null"); } + this.dbFile = dbPath; } /** * Puts a Key into file. * - * @param key - key + * @param key - key * @param value - value */ public void put(byte[] key, byte[] value) { @@ -63,7 +65,6 @@ class OzoneLevelDBStore { * Get Key. * * @param key key - * * @return value */ public byte[] get(byte[] key) { @@ -87,4 +88,37 @@ class OzoneLevelDBStore { public void close() throws IOException { db.close(); } + + /** + * Returns true if the DB is empty. + * + * @return boolean + * @throws IOException + */ + public boolean isEmpty() throws IOException { + DBIterator iter = db.iterator(); + try { + iter.seekToFirst(); + return iter.hasNext(); + } finally { + iter.close(); + } + } + + /** + * Returns Java File Object that points to the DB. + * @return File + */ + public File getDbFile() { + return dbFile; + } + + /** + * Returns the actual levelDB object. + * @return DB handle. + */ + public DB getDB() { + return db; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java new file mode 100644 index 00000000000..cc85a240fd5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.container.ozoneimpl; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl; +import org.apache.hadoop.ozone.container.common.impl.Dispatcher; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; +import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.LinkedList; +import java.util.List; +import java.nio.file.Path; + +/** + * Ozone main class sets up the network server and initializes the container + * layer. + */ +public class OzoneContainer { + private static final Logger LOG = + LoggerFactory.getLogger(OzoneContainer.class); + + private final Configuration ozoneConfig; + private final FsDatasetSpi dataSet; + private final ContainerDispatcher dispatcher; + private final ContainerManager manager; + private final XceiverServer server; + + /** + * Creates a network endpoint and enables Ozone container. + * + * @param ozoneConfig - Config + * @param dataSet - FsDataset. + * @throws IOException + */ + public OzoneContainer(Configuration ozoneConfig, FsDatasetSpi dataSet) throws + Exception { + List locations = new LinkedList<>(); + String[] paths = ozoneConfig.getStrings(OzoneConfigKeys + .DFS_OZONE_METADATA_DIRS); + if (paths != null && paths.length > 0) { + for (String p : paths) { + locations.add(Paths.get(p)); + } + } else { + getDataDir(dataSet, locations); + } + + this.ozoneConfig = ozoneConfig; + this.dataSet = dataSet; + + manager = new ContainerManagerImpl(); + manager.init(this.ozoneConfig, locations, this.dataSet); + + this.dispatcher = new Dispatcher(manager); + server = new XceiverServer(this.ozoneConfig, this.dispatcher); + } + + /** + * Starts serving requests to ozone container. + * @throws Exception + */ + public void start() throws Exception { + server.start(); + } + + /** + * Stops the ozone container. + * @throws Exception + */ + public void stop() throws Exception { + server.stop(); + } + + /** + * Returns a paths to data dirs. + * @param dataset - FSDataset. + * @param pathList - List of paths. + * @throws IOException + */ + private void getDataDir(FsDatasetSpi dataset, List pathList) throws + IOException { + FsDatasetSpi.FsVolumeReferences references; + try { + synchronized (dataset) { + references = dataset.getFsVolumeReferences(); + for (int ndx = 0; ndx < references.size(); ndx++) { + FsVolumeSpi vol = references.get(ndx); + pathList.add(Paths.get(vol.getBasePath())); + } + references.close(); + } + } catch (IOException ex) { + LOG.error("Unable to get volume paths.", ex); + throw new IOException("Internal error", ex); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java new file mode 100644 index 00000000000..c99c038b244 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.ozoneimpl; +/** + Ozone main that calls into the container layer +**/ \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java index 248abab79ca..4df303a1688 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.web.request.OzoneAcl; import org.apache.hadoop.ozone.web.response.BucketInfo; import org.apache.hadoop.ozone.web.response.KeyInfo; import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.http.HttpEntity; import org.apache.http.HttpRequest; import org.apache.http.HttpRequestInterceptor; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java index 0de1eb2e45d..fd96c5ad864 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.ozone.web.response.BucketInfo; import org.apache.hadoop.ozone.web.response.ListBuckets; import org.apache.hadoop.ozone.web.response.VolumeInfo; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java index d62c72ddcdd..7d479488568 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.web.handlers; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ozone.web.request.OzoneAcl; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import java.util.LinkedList; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java index da093532555..c7d3eed7b42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.ozone.web.headers.Header; import org.apache.hadoop.ozone.web.interfaces.Bucket; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.slf4j.MDC; @@ -37,7 +37,7 @@ import java.io.IOException; import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_FUNCTION; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_FUNCTION; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java index 2639e23e097..6205794f2ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.interfaces.UserAuth; import org.apache.hadoop.ozone.web.response.BucketInfo; import org.apache.hadoop.ozone.web.response.ListKeys; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.slf4j.Logger; @@ -47,10 +47,11 @@ import java.util.LinkedList; import java.util.List; import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_COMPONENT; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_RESOURCE; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_REQUEST; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_USER; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER; + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java index 88e90527366..31997587dd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java @@ -44,10 +44,11 @@ import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_BUCKET_N import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.SERVER_ERROR; import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.newError; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_COMPONENT; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_REQUEST; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_RESOURCE; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_USER; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER; + /** * This class abstracts way the repetitive tasks in Key handling code. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java index 2ce39ba5200..44f4515af42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java @@ -38,7 +38,7 @@ import java.io.IOException; import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_FUNCTION; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_FUNCTION; /** * VolumeHandler handles volume specific HTTP calls. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java index 7ca5d475c64..db19908277a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java @@ -41,10 +41,10 @@ import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_COMPONENT; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_RESOURCE; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_REQUEST; -import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_USER; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java index 8ab679aa158..6e3e88ebe57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java @@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory; import org.slf4j.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.common.utils.LevelDBStore; import org.apache.hadoop.ozone.web.exceptions.ErrorTable; import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.ozone.web.handlers.BucketArgs; @@ -37,7 +38,7 @@ import org.apache.hadoop.ozone.web.response.ListBuckets; import org.apache.hadoop.ozone.web.response.ListVolumes; import org.apache.hadoop.ozone.web.response.VolumeInfo; import org.apache.hadoop.ozone.web.response.VolumeOwner; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.iq80.leveldb.DBException; import org.apache.commons.codec.digest.DigestUtils; @@ -125,8 +126,8 @@ public final class OzoneMetadataManager { private static final String USER_DB = "/user.db"; private static final String META_DB = "/metadata.db"; private static OzoneMetadataManager bm = null; - private OzoneLevelDBStore userDB; - private OzoneLevelDBStore metadataDB; + private LevelDBStore userDB; + private LevelDBStore metadataDB; private ReadWriteLock lock; private Charset encoding = Charset.forName("UTF-8"); private String storageRoot; @@ -154,8 +155,8 @@ public final class OzoneMetadataManager { } try { - userDB = new OzoneLevelDBStore(new File(storageRoot + USER_DB), true); - metadataDB = new OzoneLevelDBStore(new File(storageRoot + META_DB), true); + userDB = new LevelDBStore(new File(storageRoot + USER_DB), true); + metadataDB = new LevelDBStore(new File(storageRoot + META_DB), true); inProgressObjects = new ConcurrentHashMap<>(); } catch (IOException ex) { LOG.error("Cannot open db :" + ex.getMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/OzoneAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/OzoneAcl.java index cdeb6c60ec8..92c7f99116d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/OzoneAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/request/OzoneAcl.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.web.request; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import java.util.Objects; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java index 857cc61eb65..85350470687 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.web.response; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ozone.web.request.OzoneAcl; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.codehaus.jackson.annotate.JsonAutoDetect; import org.codehaus.jackson.annotate.JsonMethod; import org.codehaus.jackson.map.ObjectMapper; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java index 85b67fbae2e..609a47b4908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/userauth/Simple.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.headers.Header; import org.apache.hadoop.ozone.web.interfaces.UserAuth; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import javax.ws.rs.core.HttpHeaders; import java.util.List; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index 8036770416d..d94e6a1d4c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.web.utils; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream; import org.apache.hadoop.ozone.web.exceptions.ErrorTable; import org.apache.hadoop.ozone.web.exceptions.OzoneException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto index 099f93fa5bb..38a378fb0cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto @@ -168,7 +168,13 @@ message KeyValue { message ContainerData { required string name = 1; repeated KeyValue metadata = 2; - optional string containerPath = 3; + optional string dbPath = 3; + optional string containerPath = 4; +} + +message ContainerMeta { + required string fileName = 1; + required string hash = 2; } // Container Messages. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java new file mode 100644 index 00000000000..3b498e276a2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -0,0 +1,256 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.helpers.ContainerData; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.utils.LevelDBStore; +import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.container.ContainerTestHelper + .createSingleNodePipeline; +import static org.junit.Assert.fail; + +/** + * Simple tests to verify that container persistence works as expected. + */ +public class TestContainerPersistence { + + static String path; + static ContainerManagerImpl containerManager; + static OzoneConfiguration conf; + static FsDatasetSpi fsDataSet; + static MiniDFSCluster cluster; + static List pathLists = new LinkedList<>(); + + @BeforeClass + public static void init() throws IOException { + conf = new OzoneConfiguration(); + URL p = conf.getClass().getResource(""); + path = p.getPath().concat( + TestContainerPersistence.class.getSimpleName()); + path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, + OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT); + conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path); + conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true); + conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local"); + + File containerDir = new File(path); + if (containerDir.exists()) { + FileUtils.deleteDirectory(new File(path)); + } + + Assert.assertTrue(containerDir.mkdirs()); + + cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + fsDataSet = cluster.getDataNodes().get(0).getFSDataset(); + containerManager = new ContainerManagerImpl(); + } + + @AfterClass + public static void shutdown() throws IOException { + cluster.shutdown(); + FileUtils.deleteDirectory(new File(path)); + } + + @Before + public void setupPaths() throws IOException { + if (!new File(path).exists()) { + new File(path).mkdirs(); + } + pathLists.clear(); + containerManager.getContainerMap().clear(); + pathLists.add(Paths.get(path)); + containerManager.init(conf, pathLists, fsDataSet); + } + + @After + public void cleanupDir() throws IOException { + FileUtils.deleteDirectory(new File(path)); + } + + @Test + public void testCreateContainer() throws Exception { + + String containerName = OzoneUtils.getRequestID(); + ContainerData data = new ContainerData(containerName); + data.addMetadata("VOLUME", "shire"); + data.addMetadata("owner)", "bilbo"); + containerManager.createContainer(createSingleNodePipeline(), data); + Assert.assertTrue(containerManager.getContainerMap() + .containsKey(containerName)); + ContainerManagerImpl.ContainerStatus status = containerManager + .getContainerMap().get(containerName); + + Assert.assertTrue(status.isActive()); + Assert.assertNotNull(status.getContainer().getContainerPath()); + Assert.assertNotNull(status.getContainer().getDBPath()); + + + Assert.assertTrue(new File(status.getContainer().getContainerPath()) + .exists()); + + String containerPathString = ContainerUtils.getContainerNameFromFile(new + File(status.getContainer().getContainerPath())); + + Path meta = Paths.get(containerPathString); + + String metadataFile = meta.toString() + OzoneConsts.CONTAINER_META; + Assert.assertTrue(new File(metadataFile).exists()); + + + String dbPath = status.getContainer().getDBPath(); + + LevelDBStore store = null; + try { + store = new LevelDBStore(new File(dbPath), false); + Assert.assertNotNull(store.getDB()); + } finally { + if (store != null) { + store.close(); + } + } + } + + @Test + public void testCreateDuplicateContainer() throws Exception { + String containerName = OzoneUtils.getRequestID(); + + ContainerData data = new ContainerData(containerName); + data.addMetadata("VOLUME", "shire"); + data.addMetadata("owner)", "bilbo"); + containerManager.createContainer(createSingleNodePipeline(), data); + try { + containerManager.createContainer(createSingleNodePipeline(), data); + fail("Expected Exception not thrown."); + } catch (IOException ex) { + Assert.assertNotNull(ex); + } + } + + @Test + public void testDeleteContainer() throws Exception { + String containerName1 = OzoneUtils.getRequestID(); + String containerName2 = OzoneUtils.getRequestID(); + + + ContainerData data = new ContainerData(containerName1); + data.addMetadata("VOLUME", "shire"); + data.addMetadata("owner)", "bilbo"); + containerManager.createContainer(createSingleNodePipeline(), data); + + data = new ContainerData(containerName2); + data.addMetadata("VOLUME", "shire"); + data.addMetadata("owner)", "bilbo"); + containerManager.createContainer(createSingleNodePipeline(), data); + + + Assert.assertTrue(containerManager.getContainerMap() + .containsKey(containerName1)); + Assert.assertTrue(containerManager.getContainerMap() + .containsKey(containerName2)); + + containerManager.deleteContainer(createSingleNodePipeline(), + containerName1); + Assert.assertFalse(containerManager.getContainerMap() + .containsKey(containerName1)); + + // Let us make sure that we are able to re-use a container name after + // delete. + + data = new ContainerData(containerName1); + data.addMetadata("VOLUME", "shire"); + data.addMetadata("owner)", "bilbo"); + containerManager.createContainer(createSingleNodePipeline(), data); + + // Assert we still have both containers. + Assert.assertTrue(containerManager.getContainerMap() + .containsKey(containerName1)); + Assert.assertTrue(containerManager.getContainerMap() + .containsKey(containerName2)); + + } + + /** + * This test creates 1000 containers and reads them back 5 containers at a + * time and verifies that we did get back all containers. + * + * @throws IOException + */ + @Test + public void testListContainer() throws IOException { + final int count = 1000; + final int step = 5; + + Map testMap = new HashMap<>(); + for (int x = 0; x < count; x++) { + String containerName = OzoneUtils.getRequestID(); + + ContainerData data = new ContainerData(containerName); + data.addMetadata("VOLUME", "shire"); + data.addMetadata("owner)", "bilbo"); + containerManager.createContainer(createSingleNodePipeline(), data); + testMap.put(containerName, data); + } + + int counter = 0; + String prevKey = ""; + List results = new LinkedList<>(); + while (counter < count) { + containerManager.listContainer(prevKey, step, results); + for (int y = 0; y < results.size(); y++) { + testMap.remove(results.get(y).getContainerName()); + } + counter += step; + String nextKey = results.get(results.size() - 1).getContainerName(); + + //Assert that container is returning results in a sorted fashion. + Assert.assertTrue(prevKey.compareTo(nextKey) < 0); + prevKey = nextKey; + results.clear(); + } + // Assert that we listed all the keys that we had put into + // container. + Assert.assertTrue(testMap.isEmpty()); + + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java new file mode 100644 index 00000000000..5beb8b907cb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.ozoneimpl; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConfiguration; +import org.apache.hadoop.ozone.container.ContainerTestHelper; +import org.apache.hadoop.ozone.container.common.helpers.Pipeline; +import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl; +import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.net.URL; + + +public class TestOzoneContainer { + @Test + public void testCreateOzoneContainer() throws Exception { + + Configuration conf = new OzoneConfiguration(); + URL p = conf.getClass().getResource(""); + String path = p.getPath().concat( + TestOzoneContainer.class.getSimpleName()); + path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, + OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT); + conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path); + + // We don't start Ozone Container via data node, we will do it + // independently in our test path. + conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, false); + conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local"); + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + + + Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); + conf.setInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT, + pipeline.getLeader().getContainerPort()); + OzoneContainer container = new OzoneContainer(conf, cluster.getDataNodes + ().get(0).getFSDataset()); + container.start(); + + XceiverClient client = new XceiverClient(pipeline, conf); + client.connect(); + ContainerProtos.ContainerCommandRequestProto request = + ContainerTestHelper.getCreateContainerRequest(); + ContainerProtos.ContainerCommandResponseProto response = + client.sendCommand(request); + Assert.assertNotNull(response); + Assert.assertTrue(request.getTraceID().equals(response.getTraceID())); + container.stop(); + cluster.shutdown(); + + } + + + @Test + public void testOzoneContainerViaDataNode() throws Exception { + + Configuration conf = new OzoneConfiguration(); + URL p = conf.getClass().getResource(""); + String path = p.getPath().concat( + TestOzoneContainer.class.getSimpleName()); + path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, + OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT); + conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path); + + // Start ozone container Via Datanode create. + conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true); + conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local"); + + Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); + conf.setInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT, + pipeline.getLeader().getContainerPort()); + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + + // This client talks to ozone container via datanode. + XceiverClient client = new XceiverClient(pipeline, conf); + client.connect(); + ContainerProtos.ContainerCommandRequestProto request = + ContainerTestHelper.getCreateContainerRequest(); + ContainerProtos.ContainerCommandResponseProto response = + client.sendCommand(request); + Assert.assertNotNull(response); + Assert.assertTrue(request.getTraceID().equals(response.getTraceID())); + cluster.shutdown(); + + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java index acf1d9397c7..75998468040 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java @@ -22,7 +22,7 @@ package org.apache.hadoop.ozone.web; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ozone.web.request.OzoneAcl; import org.apache.hadoop.ozone.web.response.BucketInfo; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.junit.Test; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java index 877a33dc6a4..fd0ed366800 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java @@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.web.exceptions.ErrorTable; import org.apache.hadoop.ozone.web.headers.Header; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.util.Time; import org.apache.http.HttpResponse; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java index 857ef343a6b..1c2ae755cdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java @@ -29,13 +29,12 @@ import java.util.Date; import java.util.Locale; import javax.ws.rs.core.HttpHeaders; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.web.headers.Header; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.util.Time; import org.apache.http.HttpResponse; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java index 45b8795f4c9..3f50e91b89b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java @@ -24,12 +24,11 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.web.exceptions.OzoneException; import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.ozone.web.utils.OzoneConsts; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test;