HDFS-11103. Ozone: Cleanup some dependencies. Contributed by Anu Engineer.

This commit is contained in:
Anu Engineer 2016-11-08 10:04:46 -08:00 committed by Owen O'Malley
parent e49e305f25
commit 8274ff356a
5 changed files with 84 additions and 111 deletions

View File

@ -19,26 +19,21 @@
package org.apache.hadoop.ozone.container.common.impl; package org.apache.hadoop.ozone.container.common.impl;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerLocationManager; import org.apache.hadoop.ozone.container.common.interfaces
.ContainerLocationManager;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.LinkedList;
import java.util.List; import java.util.List;
/** /**
* A class that tells the ContainerManager where to place the containers. * A class that tells the ContainerManager where to place the containers.
* Please note : There is *no* one-to-one correlation between metadata * Please note : There is *no* one-to-one correlation between metadata
* locations and data locations. * metadataLocations and data metadataLocations.
* *
* For example : A user could map all container files to a * For example : A user could map all container files to a
* SSD but leave data/metadata on bunch of other disks. * SSD but leave data/metadata on bunch of other disks.
@ -47,46 +42,27 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(ContainerLocationManagerImpl.class); LoggerFactory.getLogger(ContainerLocationManagerImpl.class);
private final List<StorageLocation> dataLocations;
private final Configuration conf;
private final FsDatasetSpi<? extends FsVolumeSpi> dataset;
private final Path[] volumePaths;
private int currentIndex; private int currentIndex;
private final List<Path> locations; private final List<StorageLocation> metadataLocations;
/** /**
* Constructs a Location Manager. * Constructs a Location Manager.
* @param conf - Configuration. * @param metadataLocations - Refers to the metadataLocations
* where we store the container metadata.
* @param dataDirs - metadataLocations where we store the actual
* data or chunk files.
* @throws IOException
*/ */
public ContainerLocationManagerImpl( public ContainerLocationManagerImpl(List<StorageLocation> metadataLocations,
Configuration conf, List<Path> locations, List<StorageLocation> dataDirs)
FsDatasetSpi<? extends FsVolumeSpi> dataset) throws IOException { throws IOException {
this.conf = conf; dataLocations = dataDirs;
this.dataset = dataset; this.metadataLocations = metadataLocations;
List<Path> pathList = new LinkedList<>();
FsDatasetSpi.FsVolumeReferences references;
try {
synchronized (this.dataset) {
references = this.dataset.getFsVolumeReferences();
for (int ndx = 0; ndx < references.size(); ndx++) {
FsVolumeSpi vol = references.get(ndx);
pathList.add(Paths.get(vol.getBaseURI().getPath()));
} }
references.close();
volumePaths = pathList.toArray(new Path[pathList.size()]);
this.locations = locations;
}
} catch (IOException ex) {
LOG.error("Unable to get volume paths.", ex);
throw new IOException("Internal error", ex);
}
}
/** /**
* Returns the path where the container should be placed from a set of * Returns the path where the container should be placed from a set of
* locations. * metadataLocations.
* *
* @return A path where we should place this container and metadata. * @return A path where we should place this container and metadata.
* @throws IOException * @throws IOException
@ -94,9 +70,10 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager {
@Override @Override
public Path getContainerPath() public Path getContainerPath()
throws IOException { throws IOException {
Preconditions.checkState(locations.size() > 0); Preconditions.checkState(metadataLocations.size() > 0);
int index = currentIndex % locations.size(); int index = currentIndex % metadataLocations.size();
return locations.get(index).resolve(OzoneConsts.CONTAINER_ROOT_PREFIX); Path path = metadataLocations.get(index).getFile().toPath();
return path.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
} }
/** /**
@ -107,7 +84,8 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager {
*/ */
@Override @Override
public Path getDataPath(String containerName) throws IOException { public Path getDataPath(String containerName) throws IOException {
Path currentPath = volumePaths[currentIndex++ % volumePaths.length]; Path currentPath = dataLocations.get(currentIndex++ % dataLocations.size())
.getFile().toPath();
currentPath = currentPath.resolve(OzoneConsts.CONTAINER_PREFIX); currentPath = currentPath.resolve(OzoneConsts.CONTAINER_PREFIX);
return currentPath.resolve(containerName); return currentPath.resolve(containerName);
} }

View File

@ -24,17 +24,17 @@ import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos; import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerLocationManager; import org.apache.hadoop.ozone.container.common.interfaces
.ContainerLocationManager;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
import org.apache.hadoop.ozone.container.common.interfaces.KeyManager; import org.apache.hadoop.ozone.container.common.interfaces.KeyManager;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -49,11 +49,13 @@ import java.security.DigestInputStream;
import java.security.DigestOutputStream; import java.security.DigestOutputStream;
import java.security.MessageDigest; import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
@ -85,17 +87,16 @@ public class ContainerManagerImpl implements ContainerManager {
*/ */
@Override @Override
public void init( public void init(
Configuration config, List<Path> containerDirs, Configuration config, List<StorageLocation> containerDirs)
FsDatasetSpi<? extends FsVolumeSpi> dataset) throws IOException { throws IOException {
Preconditions.checkNotNull(config); Preconditions.checkNotNull(config);
Preconditions.checkNotNull(containerDirs); Preconditions.checkNotNull(containerDirs);
Preconditions.checkState(containerDirs.size() > 0); Preconditions.checkState(containerDirs.size() > 0);
readLock(); readLock();
try { try {
for (Path path : containerDirs) { for (StorageLocation path : containerDirs) {
File directory = path.toFile(); File directory = path.getFile();
if (!directory.isDirectory()) { if (!directory.isDirectory()) {
LOG.error("Invalid path to container metadata directory. path: {}", LOG.error("Invalid path to container metadata directory. path: {}",
path.toString()); path.toString());
@ -112,8 +113,14 @@ public class ContainerManagerImpl implements ContainerManager {
} }
} }
} }
this.locationManager = new ContainerLocationManagerImpl(config,
containerDirs, dataset); List<StorageLocation> dataDirs = new LinkedList<>();
for (String dir : config.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
StorageLocation location = StorageLocation.parse(dir);
dataDirs.add(location);
}
this.locationManager =
new ContainerLocationManagerImpl(containerDirs, dataDirs);
} finally { } finally {
readUnlock(); readUnlock();
@ -286,8 +293,8 @@ public class ContainerManagerImpl implements ContainerManager {
// In case of ozone this is *not* a deal breaker since // In case of ozone this is *not* a deal breaker since
// SCM is guaranteed to generate unique container names. // SCM is guaranteed to generate unique container names.
LOG.error("creation of container failed. Name: {} " LOG.error("creation of container failed. Name: {} ",
, containerData.getContainerName()); containerData.getContainerName());
throw ex; throw ex;
} finally { } finally {
IOUtils.closeStream(dos); IOUtils.closeStream(dos);
@ -528,7 +535,7 @@ public class ContainerManagerImpl implements ContainerManager {
* @param containerData - ContainerData. * @param containerData - ContainerData.
* @param active - Active or not active. * @param active - Active or not active.
*/ */
public ContainerStatus(ContainerData containerData, boolean active) { ContainerStatus(ContainerData containerData, boolean active) {
this.containerData = containerData; this.containerData = containerData;
this.active = active; this.active = active;
} }

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -21,14 +21,12 @@ package org.apache.hadoop.ozone.container.common.interfaces;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.hdfs.util.RwLock;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Path;
import java.util.List; import java.util.List;
/** /**
@ -45,8 +43,7 @@ public interface ContainerManager extends RwLock {
* @param containerDirs - List of Metadata Container locations. * @param containerDirs - List of Metadata Container locations.
* @throws IOException * @throws IOException
*/ */
void init(Configuration config, List<Path> containerDirs, void init(Configuration config, List<StorageLocation> containerDirs)
FsDatasetSpi<? extends FsVolumeSpi> dataset)
throws IOException; throws IOException;
/** /**

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.ozoneimpl; package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
@ -34,10 +35,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Paths;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.nio.file.Path;
/** /**
* Ozone main class sets up the network server and initializes the container * Ozone main class sets up the network server and initializes the container
@ -48,7 +47,6 @@ public class OzoneContainer {
LoggerFactory.getLogger(OzoneContainer.class); LoggerFactory.getLogger(OzoneContainer.class);
private final Configuration ozoneConfig; private final Configuration ozoneConfig;
private final FsDatasetSpi<? extends FsVolumeSpi> dataSet;
private final ContainerDispatcher dispatcher; private final ContainerDispatcher dispatcher;
private final ContainerManager manager; private final ContainerManager manager;
private final XceiverServer server; private final XceiverServer server;
@ -65,22 +63,21 @@ public class OzoneContainer {
public OzoneContainer( public OzoneContainer(
Configuration ozoneConfig, Configuration ozoneConfig,
FsDatasetSpi<? extends FsVolumeSpi> dataSet) throws Exception { FsDatasetSpi<? extends FsVolumeSpi> dataSet) throws Exception {
List<Path> locations = new LinkedList<>(); List<StorageLocation> locations = new LinkedList<>();
String[] paths = ozoneConfig.getStrings(OzoneConfigKeys String[] paths = ozoneConfig.getStrings(OzoneConfigKeys
.OZONE_METADATA_DIRS); .OZONE_METADATA_DIRS);
if (paths != null && paths.length > 0) { if (paths != null && paths.length > 0) {
for (String p : paths) { for (String p : paths) {
locations.add(Paths.get(p)); locations.add(StorageLocation.parse(p));
} }
} else { } else {
getDataDir(dataSet, locations); getDataDir(dataSet, locations);
} }
this.ozoneConfig = ozoneConfig; this.ozoneConfig = ozoneConfig;
this.dataSet = dataSet;
manager = new ContainerManagerImpl(); manager = new ContainerManagerImpl();
manager.init(this.ozoneConfig, locations, this.dataSet); manager.init(this.ozoneConfig, locations);
this.chunkManager = new ChunkManagerImpl(manager); this.chunkManager = new ChunkManagerImpl(manager);
manager.setChunkManager(this.chunkManager); manager.setChunkManager(this.chunkManager);
@ -153,14 +150,14 @@ public class OzoneContainer {
*/ */
private void getDataDir( private void getDataDir(
FsDatasetSpi<? extends FsVolumeSpi> dataset, FsDatasetSpi<? extends FsVolumeSpi> dataset,
List<Path> pathList) throws IOException { List<StorageLocation> pathList) throws IOException {
FsDatasetSpi.FsVolumeReferences references; FsDatasetSpi.FsVolumeReferences references;
try { try {
synchronized (dataset) { synchronized (dataset) {
references = dataset.getFsVolumeReferences(); references = dataset.getFsVolumeReferences();
for (int ndx = 0; ndx < references.size(); ndx++) { for (int ndx = 0; ndx < references.size(); ndx++) {
FsVolumeSpi vol = references.get(ndx); FsVolumeSpi vol = references.get(ndx);
pathList.add(Paths.get(vol.getBaseURI().getPath())); pathList.add(StorageLocation.parse(vol.getBaseURI().getPath()));
} }
references.close(); references.close();
} }

View File

@ -1,19 +1,18 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one or more
* or more contributor license agreements. See the NOTICE file * contributor license agreements. See the NOTICE file distributed with this
* distributed with this work for additional information * work for additional information regarding copyright ownership. The ASF
* regarding copyright ownership. The ASF licenses this file * licenses this file to you under the Apache License, Version 2.0 (the
* to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License.
* "License"); you may not use this file except in compliance * You may obtain a copy of the License at
* with the License. You may obtain a copy of the License at * <p>
*
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* * <p>
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* See the License for the specific language governing permissions and * License for the specific language governing permissions and limitations under
* limitations under the License. * the License.
*/ */
package org.apache.hadoop.ozone.container.common.impl; package org.apache.hadoop.ozone.container.common.impl;
@ -21,8 +20,7 @@ package org.apache.hadoop.ozone.container.common.impl;
import org.apache.commons.codec.binary.Hex; import org.apache.commons.codec.binary.Hex;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos; import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfiguration;
@ -31,9 +29,9 @@ import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.ozone.container.common.utils.LevelDBStore; import org.apache.hadoop.ozone.container.common.utils.LevelDBStore;
import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
@ -64,7 +62,6 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData; import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
import static org.apache.hadoop.ozone.container.ContainerTestHelper import static org.apache.hadoop.ozone.container.ContainerTestHelper
.setDataChecksum; .setDataChecksum;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
/** /**
@ -85,9 +82,8 @@ public class TestContainerPersistence {
private static ChunkManagerImpl chunkManager; private static ChunkManagerImpl chunkManager;
private static KeyManagerImpl keyManager; private static KeyManagerImpl keyManager;
private static OzoneConfiguration conf; private static OzoneConfiguration conf;
private static FsDatasetSpi<? extends FsVolumeSpi> fsDataSet;
private static MiniOzoneCluster cluster; private static MiniOzoneCluster cluster;
private static List<Path> pathLists = new LinkedList<>(); private static List<StorageLocation> pathLists = new LinkedList<>();
@BeforeClass @BeforeClass
public static void init() throws Throwable { public static void init() throws Throwable {
@ -103,12 +99,10 @@ public class TestContainerPersistence {
if (containerDir.exists()) { if (containerDir.exists()) {
FileUtils.deleteDirectory(new File(path)); FileUtils.deleteDirectory(new File(path));
} }
Assert.assertTrue(containerDir.mkdirs()); Assert.assertTrue(containerDir.mkdirs());
cluster = new MiniOzoneCluster.Builder(conf) cluster = new MiniOzoneCluster.Builder(conf)
.setHandlerType("local").build(); .setHandlerType("local").build();
fsDataSet = cluster.getDataNodes().get(0).getFSDataset();
containerManager = new ContainerManagerImpl(); containerManager = new ContainerManagerImpl();
chunkManager = new ChunkManagerImpl(containerManager); chunkManager = new ChunkManagerImpl(containerManager);
containerManager.setChunkManager(chunkManager); containerManager.setChunkManager(chunkManager);
@ -130,8 +124,8 @@ public class TestContainerPersistence {
} }
pathLists.clear(); pathLists.clear();
containerManager.getContainerMap().clear(); containerManager.getContainerMap().clear();
pathLists.add(Paths.get(path)); pathLists.add(StorageLocation.parse(path.toString()));
containerManager.init(conf, pathLists, fsDataSet); containerManager.init(conf, pathLists);
} }
@After @After
@ -190,8 +184,8 @@ public class TestContainerPersistence {
containerManager.createContainer(createSingleNodePipeline(containerName), containerManager.createContainer(createSingleNodePipeline(containerName),
data); data);
try { try {
containerManager.createContainer(createSingleNodePipeline containerManager.createContainer(createSingleNodePipeline(
(containerName), data); containerName), data);
fail("Expected Exception not thrown."); fail("Expected Exception not thrown.");
} catch (IOException ex) { } catch (IOException ex) {
Assert.assertNotNull(ex); Assert.assertNotNull(ex);
@ -207,14 +201,14 @@ public class TestContainerPersistence {
ContainerData data = new ContainerData(containerName1); ContainerData data = new ContainerData(containerName1);
data.addMetadata("VOLUME", "shire"); data.addMetadata("VOLUME", "shire");
data.addMetadata("owner)", "bilbo"); data.addMetadata("owner)", "bilbo");
containerManager.createContainer(createSingleNodePipeline(containerName1) containerManager.createContainer(createSingleNodePipeline(containerName1),
, data); data);
data = new ContainerData(containerName2); data = new ContainerData(containerName2);
data.addMetadata("VOLUME", "shire"); data.addMetadata("VOLUME", "shire");
data.addMetadata("owner)", "bilbo"); data.addMetadata("owner)", "bilbo");
containerManager.createContainer(createSingleNodePipeline(containerName2) containerManager.createContainer(createSingleNodePipeline(containerName2),
, data); data);
Assert.assertTrue(containerManager.getContainerMap() Assert.assertTrue(containerManager.getContainerMap()
@ -233,8 +227,8 @@ public class TestContainerPersistence {
data = new ContainerData(containerName1); data = new ContainerData(containerName1);
data.addMetadata("VOLUME", "shire"); data.addMetadata("VOLUME", "shire");
data.addMetadata("owner)", "bilbo"); data.addMetadata("owner)", "bilbo");
containerManager.createContainer(createSingleNodePipeline(containerName1) containerManager.createContainer(createSingleNodePipeline(containerName1),
, data); data);
// Assert we still have both containers. // Assert we still have both containers.
Assert.assertTrue(containerManager.getContainerMap() Assert.assertTrue(containerManager.getContainerMap()
@ -262,8 +256,8 @@ public class TestContainerPersistence {
ContainerData data = new ContainerData(containerName); ContainerData data = new ContainerData(containerName);
data.addMetadata("VOLUME", "shire"); data.addMetadata("VOLUME", "shire");
data.addMetadata("owner)", "bilbo"); data.addMetadata("owner)", "bilbo");
containerManager.createContainer(createSingleNodePipeline containerManager.createContainer(createSingleNodePipeline(containerName),
(containerName), data); data);
testMap.put(containerName, data); testMap.put(containerName, data);
} }