HDDS-332. Remove the ability to configure ozone.handler.type

Contributed by Nandakumar and Anu Engineer.
This commit is contained in:
Anu Engineer 2018-08-28 09:56:02 -07:00
parent 2172399c55
commit df21e1b1dd
37 changed files with 185 additions and 2096 deletions

View File

@ -66,16 +66,9 @@ public final class OzoneConfigKeys {
"dfs.container.ratis.ipc.random.port";
public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
false;
public static final String OZONE_LOCALSTORAGE_ROOT =
"ozone.localstorage.root";
public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone";
public static final String OZONE_ENABLED =
"ozone.enabled";
public static final boolean OZONE_ENABLED_DEFAULT = false;
public static final String OZONE_HANDLER_TYPE_KEY =
"ozone.handler.type";
public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed";
public static final String OZONE_TRACE_ENABLED_KEY =
"ozone.trace.enabled";
public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;

View File

@ -114,7 +114,6 @@ public final class OzoneConsts {
* Ozone handler types.
*/
public static final String OZONE_HANDLER_DISTRIBUTED = "distributed";
public static final String OZONE_HANDLER_LOCAL = "local";
public static final String DELETING_KEY_PREFIX = "#deleting#";
public static final String DELETED_KEY_PREFIX = "#deleted#";

View File

@ -277,17 +277,6 @@
Please note: By default ozone is disabled on a hadoop cluster.
</description>
</property>
<property>
<name>ozone.handler.type</name>
<value>distributed</value>
<tag>OZONE, REST</tag>
<description>
Tells ozone which storage handler to use. The possible values are:
distributed - The Ozone distributed storage handler, which speaks to
OM/SCM on the backend and provides REST services to clients.
local - Local Storage handler strictly for testing - To be removed.
</description>
</property>
<property>
<name>ozone.key.deleting.limit.per.task</name>
<value>1000</value>
@ -416,16 +405,6 @@
Default user permissions used in OM.
</description>
</property>
<property>
<name>ozone.localstorage.root</name>
<value>${hadoop.tmp.dir}/ozone</value>
<tag>OZONE, DEBUG</tag>
<description>
This is used only for testing purposes. This value is used by the local
storage handler to simulate a REST backend. This is useful only when
debugging the REST front end independent of OM and SCM. To be removed.
</description>
</property>
<property>
<name>ozone.metadata.dirs</name>
<value/>

View File

@ -50,10 +50,9 @@ public interface RatisTestHelper {
private final MiniOzoneCluster cluster;
/**
* Create a {@link MiniOzoneCluster} for testing by setting
* OZONE_ENABLED = true,
* RATIS_ENABLED = true, and
* OZONE_HANDLER_TYPE_KEY = "distributed".
* Create a {@link MiniOzoneCluster} for testing by setting.
* OZONE_ENABLED = true
* RATIS_ENABLED = true
*/
public RatisTestSuite(final Class<?> clazz)
throws IOException, TimeoutException, InterruptedException {
@ -88,7 +87,6 @@ public interface RatisTestHelper {
static OzoneConfiguration newOzoneConfiguration(
Class<?> clazz, RpcType rpc) {
final OzoneConfiguration conf = new OzoneConfiguration();
ContainerTestHelper.setOzoneLocalStorageRoot(clazz, conf);
initRatisConf(rpc, conf);
return conf;
}

View File

@ -22,8 +22,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
@ -65,16 +63,13 @@ public class TestOzoneRestClient {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
cluster.waitForClusterToBeReady();
InetSocketAddress omHttpAddress = cluster.getOzoneManager()

View File

@ -81,8 +81,6 @@ public class TestCloseContainerHandlingByClient {
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
chunkSize = (int)OzoneConsts.MB;
blockSize = 4 * chunkSize;
conf.setInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize);

View File

@ -24,9 +24,7 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
@ -81,16 +79,13 @@ public class TestOzoneRpcClient {
/**
* Create a MiniOzoneCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 1);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).build();
cluster.waitForClusterToBeReady();
@ -439,7 +434,7 @@ public class TestOzoneRpcClient {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = RandomStringUtils.random(RandomUtils.nextInt(0,1024));
String value = RandomStringUtils.random(RandomUtils.nextInt(0, 1024));
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
@ -73,15 +72,6 @@ public final class ContainerTestHelper {
private ContainerTestHelper() {
}
public static void setOzoneLocalStorageRoot(
Class<?> clazz, OzoneConfiguration conf) {
String path = GenericTestUtils.getTempPath(clazz.getSimpleName());
path += conf.getTrimmed(
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
}
// TODO: mock multi-node pipeline
/**
* Create a pipeline with single node replica.

View File

@ -28,7 +28,6 @@ import java.util.Random;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
@ -45,17 +44,14 @@ import org.junit.Test;
*/
public class TestContainerDeletionChoosingPolicy {
private static String path;
private static ContainerSet containerSet;
private static OzoneConfiguration conf;
private ContainerSet containerSet;
private OzoneConfiguration conf;
@Before
public void init() throws Throwable {
conf = new OzoneConfiguration();
path = GenericTestUtils
.getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName());
path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
}
@Test

View File

@ -21,21 +21,20 @@ import com.google.common.collect.Maps;
import org.apache.commons.codec.binary.Hex;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume
.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@ -45,8 +44,6 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.utils.MetadataStore;
import org.junit.After;
import org.junit.AfterClass;
@ -68,113 +65,80 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.ArrayList;
import java.util.UUID;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Stage.COMBINED;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
import static org.apache.hadoop.ozone.container.ContainerTestHelper
.setDataChecksum;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Stage.COMBINED;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Simple tests to verify that container persistence works as expected.
* Some of these tests are specific to {@link KeyValueContainer}. If a new
* {@link ContainerProtos.ContainerType} is added, the tests need to be
* modified.
* Simple tests to verify that container persistence works as expected. Some of
* these tests are specific to {@link KeyValueContainer}. If a new {@link
* ContainerProtos.ContainerType} is added, the tests need to be modified.
*/
public class TestContainerPersistence {
@Rule
public ExpectedException exception = ExpectedException.none();
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private static final String DATANODE_UUID = UUID.randomUUID().toString();
private static final String SCM_ID = UUID.randomUUID().toString();
private static Logger log =
LoggerFactory.getLogger(TestContainerPersistence.class);
private static String hddsPath;
private static String path;
private static OzoneConfiguration conf;
private static List<StorageLocation> pathLists = new LinkedList<>();
private Long containerID = 8888L;;
private static final String datanodeUuid = UUID.randomUUID().toString();
private static final String scmId = UUID.randomUUID().toString();
private static ContainerSet containerSet;
private static VolumeSet volumeSet;
private static VolumeChoosingPolicy volumeChoosingPolicy;
private static KeyManager keyManager;
private static ChunkManager chunkManager;
@Rule
public ExpectedException exception = ExpectedException.none();
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private Long containerID = 8888L;
@BeforeClass
public static void init() throws Throwable {
conf = new OzoneConfiguration();
hddsPath = GenericTestUtils
.getTempPath(TestContainerPersistence.class.getSimpleName());
path = hddsPath + conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath);
File containerDir = new File(path);
if (containerDir.exists()) {
FileUtils.deleteDirectory(new File(path));
}
Assert.assertTrue(containerDir.mkdirs());
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
}
@AfterClass
public static void shutdown() throws IOException {
FileUtils.deleteDirectory(new File(path));
FileUtils.deleteDirectory(new File(hddsPath));
}
@Before
public void setupPaths() throws IOException {
if (!new File(path).exists() && !new File(path).mkdirs()) {
throw new IOException("Unable to create paths. " + path);
}
StorageLocation loc = StorageLocation.parse(
Paths.get(path).resolve(CONTAINER_ROOT_PREFIX).toString());
pathLists.clear();
containerSet = new ContainerSet();
volumeSet = new VolumeSet(datanodeUuid, conf);
volumeSet = new VolumeSet(DATANODE_UUID, conf);
keyManager = new KeyManagerImpl(conf);
chunkManager = new ChunkManagerImpl();
if (!new File(loc.getNormalizedUri()).mkdirs()) {
throw new IOException("unable to create paths. " +
loc.getNormalizedUri());
}
pathLists.add(loc);
for (String dir : conf.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
StorageLocation location = StorageLocation.parse(dir);
FileUtils.forceMkdir(new File(location.getNormalizedUri()));
}
}
}
@After
public void cleanupDir() throws IOException {
// Clean up SCM metadata
log.info("Deleting {}", path);
FileUtils.deleteDirectory(new File(path));
log.info("Deleting {}", hddsPath);
FileUtils.deleteDirectory(new File(hddsPath));
@ -196,9 +160,9 @@ public class TestContainerPersistence {
data.addMetadata("VOLUME", "shire");
data.addMetadata("owner)", "bilbo");
KeyValueContainer container = new KeyValueContainer(data, conf);
container.create(volumeSet, volumeChoosingPolicy, scmId);
container.create(volumeSet, volumeChoosingPolicy, SCM_ID);
containerSet.addContainer(container);
return container;
return container;
}
@Test
@ -209,7 +173,7 @@ public class TestContainerPersistence {
.containsKey(testContainerID));
KeyValueContainerData kvData =
(KeyValueContainerData) containerSet.getContainer(testContainerID)
.getContainerData();
.getContainerData();
Assert.assertNotNull(kvData);
Assert.assertTrue(new File(kvData.getMetadataPath()).exists());
@ -287,7 +251,7 @@ public class TestContainerPersistence {
}
@Test
public void testGetContainerReports() throws Exception{
public void testGetContainerReports() throws Exception {
final int count = 10;
List<Long> containerIDs = new ArrayList<>();
@ -296,7 +260,7 @@ public class TestContainerPersistence {
Container container = addContainer(containerSet, testContainerID);
// Close a bunch of containers.
if (i%3 == 0) {
if (i % 3 == 0) {
container.close();
}
containerIDs.add(testContainerID);
@ -307,7 +271,8 @@ public class TestContainerPersistence {
List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
containerSet.getContainerReport().getReportsList();
Assert.assertEquals(10, reports.size());
for(StorageContainerDatanodeProtocolProtos.ContainerInfo report : reports) {
for (StorageContainerDatanodeProtocolProtos.ContainerInfo report :
reports) {
long actualContainerID = report.getContainerID();
Assert.assertTrue(containerIDs.remove(actualContainerID));
}
@ -315,8 +280,8 @@ public class TestContainerPersistence {
}
/**
* This test creates 50 containers and reads them back 5 containers at a
* time and verifies that we did get back all containers.
* This test creates 50 containers and reads them back 5 containers at a time
* and verifies that we did get back all containers.
*
* @throws IOException
*/
@ -426,7 +391,7 @@ public class TestContainerPersistence {
sha.update(FileUtils.readFileToByteArray(fname.toFile()));
String val = Hex.encodeHexString(sha.digest());
Assert.assertEquals(fileHashMap.get(fname.getFileName().toString())
.getChecksum(), val);
.getChecksum(), val);
count++;
sha.reset();
}
@ -454,8 +419,8 @@ public class TestContainerPersistence {
@Test
public void testPartialRead() throws Exception {
final int datalen = 1024;
final int start = datalen/4;
final int length = datalen/2;
final int start = datalen / 4;
final int length = datalen / 2;
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
@ -544,7 +509,8 @@ public class TestContainerPersistence {
}
// Request to read the whole data in a single go.
ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount);
ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0,
datalen * chunkCount);
byte[] newdata = chunkManager.readChunk(container, blockID, largeChunk);
MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
newSha.update(newdata);
@ -701,8 +667,8 @@ public class TestContainerPersistence {
}
/**
* Tries to update an existing and non-existing container.
* Verifies container map and persistent data both updated.
* Tries to update an existing and non-existing container. Verifies container
* map and persistent data both updated.
*
* @throws IOException
*/
@ -743,7 +709,7 @@ public class TestContainerPersistence {
orgContainerFile.getAbsolutePath(),
newContainerFile.getAbsolutePath());
ContainerData actualContainerData = ContainerDataYaml.readContainerFile(
ContainerData actualContainerData = ContainerDataYaml.readContainerFile(
newContainerFile);
Assert.assertEquals("shire_new",
actualContainerData.getMetadata().get("VOLUME"));

View File

@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneTestUtils;
import org.apache.hadoop.ozone.client.ObjectStore;
@ -88,16 +87,11 @@ public class TestBlockDeletion {
File baseDir = new File(path);
baseDir.mkdirs();
path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
conf.setQuietMode(false);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
TimeUnit.MILLISECONDS);
conf.setQuietMode(false);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1)
.setHbInterval(200)

View File

@ -1,19 +1,18 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
@ -26,8 +25,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
@ -54,20 +51,16 @@ public class TestCloseContainerByPipeline {
private static OzoneClient client;
private static ObjectStore objectStore;
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3).build();
cluster.waitForClusterToBeReady();
@ -243,7 +236,8 @@ public class TestCloseContainerByPipeline {
() -> isContainerClosed(cluster, containerID, datanodeDetails), 500,
15 * 1000);
//double check if it's really closed (waitFor also throws an exception)
Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
Assert.assertTrue(isContainerClosed(cluster,
containerID, datanodeDetails));
}
Assert.assertFalse(logCapturer.getOutput().contains(
"submitting CloseContainer request over STAND_ALONE "
@ -257,13 +251,14 @@ public class TestCloseContainerByPipeline {
private Boolean isContainerClosed(MiniOzoneCluster cluster, long containerID,
DatanodeDetails datanode) {
ContainerData containerData;
for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes())
for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) {
if (datanode.equals(datanodeService.getDatanodeDetails())) {
containerData =
datanodeService.getDatanodeStateMachine().getContainer()
.getContainerSet().getContainer(containerID).getContainerData();
return containerData.isClosed();
}
}
return false;
}
}

View File

@ -91,8 +91,6 @@ public class TestOzoneContainer {
static OzoneConfiguration newOzoneConfiguration() {
final OzoneConfiguration conf = new OzoneConfiguration();
ContainerTestHelper.setOzoneLocalStorageRoot(
TestOzoneContainer.class, conf);
return conf;
}

View File

@ -54,8 +54,6 @@ public class TestOzoneContainerRatis {
static OzoneConfiguration newOzoneConfiguration() {
final OzoneConfiguration conf = new OzoneConfiguration();
ContainerTestHelper.setOzoneLocalStorageRoot(
TestOzoneContainerRatis.class, conf);
return conf;
}

View File

@ -47,8 +47,6 @@ public class TestRatisManager {
static OzoneConfiguration newOzoneConfiguration() {
final OzoneConfiguration conf = new OzoneConfiguration();
ContainerTestHelper.setOzoneLocalStorageRoot(
TestRatisManager.class, conf);
return conf;
}

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.ozone.freon;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
import org.junit.Assert;
@ -45,16 +43,13 @@ public class TestDataValidate {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5).build();
cluster.waitForClusterToBeReady();

View File

@ -41,8 +41,7 @@ public class TestFreon {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/

View File

@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.*;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@ -33,7 +31,6 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
@ -60,16 +57,13 @@ public class TestContainerReportWithKeys {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
scm = cluster.getStorageContainerManager();
@ -117,10 +111,6 @@ public class TestContainerReportWithKeys {
cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
.get(0).getBlocksLatestVersionOnly().get(0);
ContainerData cd = getContainerData(keyInfo.getContainerID());
/* LOG.info("DN Container Data: keyCount: {} used: {} ",
cd.getKeyCount(), cd.getBytesUsed());*/
ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());

View File

@ -62,8 +62,7 @@ public class TestMultipleContainerReadWrite {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@ -73,8 +72,6 @@ public class TestMultipleContainerReadWrite {
// set to as small as 100 bytes per block.
conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 1);
conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 5);
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();

View File

@ -22,8 +22,6 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@ -67,16 +65,13 @@ public class TestOmBlockVersioning {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();

View File

@ -23,9 +23,7 @@ import java.io.IOException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -52,8 +50,6 @@ public class TestOmMetrics {
@Before
public void setup() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
ozoneManager = cluster.getOzoneManager();
@ -104,7 +100,8 @@ public class TestOmMetrics {
Mockito.doThrow(exception).when(mockVm).setOwner(null, null);
Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0);
org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager, "volumeManager", mockVm);
org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager,
"volumeManager", mockVm);
doVolumeOps();
omMetrics = getMetrics("OMMetrics");

View File

@ -20,7 +20,6 @@ import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.scm.cli.SQLCLI;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
@ -97,16 +96,13 @@ public class TestOmSQLCli {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@Before
public void setup() throws Exception {
conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
storageHandler = new ObjectStoreHandler(conf).getStorageHandler();

View File

@ -108,8 +108,7 @@ public class TestOzoneManager {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@ -119,8 +118,6 @@ public class TestOzoneManager {
clusterId = UUID.randomUUID().toString();
scmId = UUID.randomUUID().toString();
omId = UUID.randomUUID().toString();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
cluster = MiniOzoneCluster.newBuilder(conf)
.setClusterId(clusterId)

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights;
import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.OzoneBucket;
@ -134,11 +133,6 @@ public class TestOzoneShell {
baseDir = new File(path);
baseDir.mkdirs();
path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
conf.setQuietMode(false);
shell = new Shell();
shell.setConf(conf);
@ -146,6 +140,7 @@ public class TestOzoneShell {
.setNumDatanodes(3)
.build();
conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue());
conf.setQuietMode(false);
client = new RpcClient(conf);
cluster.waitForClusterToBeReady();
}
@ -208,12 +203,15 @@ public class TestOzoneShell {
testCreateVolume(volumeName, "");
volumeName = "volume" + RandomStringUtils.randomNumeric(5);
testCreateVolume("/////" + volumeName, "");
testCreateVolume("/////", "Volume name is required to create a volume");
testCreateVolume("/////", "Volume name is required " +
"to create a volume");
testCreateVolume("/////vol/123",
"Illegal argument: Bucket or Volume name has an unsupported character : /");
"Illegal argument: Bucket or Volume name has " +
"an unsupported character : /");
}
private void testCreateVolume(String volumeName, String errorMsg) throws Exception {
private void testCreateVolume(String volumeName, String errorMsg)
throws Exception {
err.reset();
String userName = "bilbo";
String[] args = new String[] {"-createVolume", url + "/" + volumeName,
@ -397,7 +395,7 @@ public class TestOzoneShell {
// test -prefix option
out.reset();
args = new String[] { "-listVolume", url + "/", "-user", user1, "-length",
args = new String[] {"-listVolume", url + "/", "-user", user1, "-length",
"100", "-prefix", "test-vol-" + protocol + "1" };
assertEquals(0, ToolRunner.run(shell, args));
commandOutput = out.toString();
@ -414,7 +412,7 @@ public class TestOzoneShell {
// test -start option
out.reset();
args = new String[] { "-listVolume", url + "/", "-user", user2, "-length",
args = new String[] {"-listVolume", url + "/", "-user", user2, "-length",
"100", "-start", "test-vol-" + protocol + "15" };
assertEquals(0, ToolRunner.run(shell, args));
commandOutput = out.toString();

View File

@ -1,187 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.TestOzoneHelper;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.io.IOException;
/**
* Test ozone volume in the local storage handler scenario.
*/
public class TestLocalOzoneVolumes extends TestOzoneHelper {
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private static MiniOzoneCluster cluster = null;
private static int port = 0;
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
* emulate Ozone backend.
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestLocalOzoneVolumes.class.getSimpleName());
path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
port = cluster.getHddsDatanodes().get(0)
.getDatanodeDetails().getPort(
DatanodeDetails.Port.Name.REST).getValue();
}
/**
* Shutdown MiniDFSCluster.
*/
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Creates Volumes on Ozone Store.
*
* @throws IOException
*/
@Test
public void testCreateVolumes() throws IOException {
super.testCreateVolumes(port);
}
/**
* Create Volumes with Quota.
*
* @throws IOException
*/
@Test
public void testCreateVolumesWithQuota() throws IOException {
super.testCreateVolumesWithQuota(port);
}
/**
* Create Volumes with Invalid Quota.
*
* @throws IOException
*/
@Test
public void testCreateVolumesWithInvalidQuota() throws IOException {
super.testCreateVolumesWithInvalidQuota(port);
}
/**
* To create a volume a user name must be specified using OZONE_USER header.
* This test verifies that we get an error in case we call without a OZONE
* user name.
*
* @throws IOException
*/
@Test
public void testCreateVolumesWithInvalidUser() throws IOException {
super.testCreateVolumesWithInvalidUser(port);
}
/**
* Only Admins can create volumes in Ozone. This test uses simple userauth as
* backend and hdfs and root are admin users in the simple backend.
* <p>
* This test tries to create a volume as user bilbo.
*
* @throws IOException
*/
@Test
public void testCreateVolumesWithOutAdminRights() throws IOException {
super.testCreateVolumesWithOutAdminRights(port);
}
/**
* Create a bunch of volumes in a loop.
*
* @throws IOException
*/
//@Test
public void testCreateVolumesInLoop() throws IOException {
super.testCreateVolumesInLoop(port);
}
/**
* Get volumes owned by the user.
*
* @throws IOException
*/
@Test
public void testGetVolumesByUser() throws IOException {
super.testGetVolumesByUser(port);
}
/**
* Admins can read volumes belonging to other users.
*
* @throws IOException
*/
@Test
public void testGetVolumesOfAnotherUser() throws IOException {
super.testGetVolumesOfAnotherUser(port);
}
/**
* if you try to read volumes belonging to another user,
* then server always ignores it.
*
* @throws IOException
*/
@Test @Ignore
public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
super.testGetVolumesOfAnotherUserShouldFail(port);
}
@Test
public void testListKeyOnEmptyBucket() throws IOException {
super.testListKeyOnEmptyBucket(port);
}
}

View File

@ -19,9 +19,7 @@ package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestOzoneHelper;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@ -39,9 +37,9 @@ import java.io.IOException;
/**
* Test ozone volume in the distributed storage handler scenario.
*/
public class TestDistributedOzoneVolumes extends TestOzoneHelper {
public class TestOzoneVolumes extends TestOzoneHelper {
private static final org.slf4j.Logger LOG =
LoggerFactory.getLogger(TestDistributedOzoneVolumes.class);
LoggerFactory.getLogger(TestOzoneVolumes.class);
/**
* Set the timeout for every test.
*/
@ -54,8 +52,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@ -63,8 +60,6 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
port = cluster.getHddsDatanodes().get(0)

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.web;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.headers.Header;
@ -62,20 +61,13 @@ public class TestOzoneWebAccess {
/**
* Create a MiniDFSCluster for testing.
*
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
* emulate Ozone backend.
*
* Ozone is made active by setting OZONE_ENABLED = true
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
String path = GenericTestUtils
.getTempPath(TestOzoneWebAccess.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
port = cluster.getHddsDatanodes().get(0)

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.web.client;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.VolumeArgs;
@ -86,9 +85,7 @@ public class TestBuckets {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
* emulate Ozone backend.
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@ -100,10 +97,6 @@ public class TestBuckets {
String path = GenericTestUtils
.getTempPath(TestBuckets.class.getSimpleName());
path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.build();

View File

@ -19,9 +19,9 @@ package org.apache.hadoop.ozone.web.client;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.RatisTestHelper;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
@ -58,7 +58,7 @@ public class TestKeysRatis {
@BeforeClass
public static void init() throws Exception {
suite = new RatisTestHelper.RatisTestSuite(TestBucketsRatis.class);
path = suite.getConf().get(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT);
path = GenericTestUtils.getTempPath(TestKeysRatis.class.getSimpleName());
ozoneCluster = suite.getCluster();
ozoneCluster.waitForClusterToBeReady();
client = suite.newOzoneClient();

View File

@ -45,7 +45,6 @@ import io.netty.handler.logging.LoggingHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.rest.headers.Header;
@ -92,8 +91,6 @@ public class TestOzoneClient {
public static void init() throws Exception {
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.ALL);
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
int port = cluster.getHddsDatanodes().get(0)

View File

@ -23,7 +23,6 @@ import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.OzoneQuota;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rest.RestClient;
@ -79,9 +78,7 @@ public class TestVolume {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
* emulate Ozone backend.
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@ -91,11 +88,8 @@ public class TestVolume {
String path = GenericTestUtils
.getTempPath(TestVolume.class.getSimpleName());
path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
FileUtils.deleteDirectory(new File(path));
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = MiniOzoneCluster.newBuilder(conf).build();
@ -221,7 +215,8 @@ public class TestVolume {
client.createVolume(volumeName);
client.setVolumeQuota(volumeName, OzoneQuota.parseQuota("1000MB"));
OzoneVolume newVol = client.getVolumeDetails(volumeName);
assertEquals(newVol.getQuota(), OzoneQuota.parseQuota("1000MB").sizeInBytes());
assertEquals(newVol.getQuota(),
OzoneQuota.parseQuota("1000MB").sizeInBytes());
// verify if the creation time is missing after setting quota operation
assertTrue(newVol.getCreationTime() > 0);
}

View File

@ -72,11 +72,8 @@ public class TestVolumeRatis {
String path = GenericTestUtils
.getTempPath(TestVolume.class.getSimpleName());
path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
FileUtils.deleteDirectory(new File(path));
conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();

View File

@ -1,28 +1,44 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
import com.sun.jersey.api.container.ContainerFactory;
import com.sun.jersey.api.core.ApplicationAdapter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
import org.apache.hadoop.ozone.web.ObjectStoreApplication;
import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
@ -30,35 +46,13 @@ import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.Map;
import com.sun.jersey.api.container.ContainerFactory;
import com.sun.jersey.api.core.ApplicationAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.ObjectStoreApplication;
import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
import org.apache.hadoop.hdds.scm.protocolPB
.ScmBlockLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler;
import org.apache.hadoop.ozone.web.localstorage.LocalStorageHandler;
import org.apache.hadoop.security.UserGroupInformation;
import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY;
/**
* Implements object store handling within the DataNode process. This class is
@ -86,69 +80,49 @@ public final class ObjectStoreHandler implements Closeable {
* @throws IOException if there is an I/O error
*/
public ObjectStoreHandler(Configuration conf) throws IOException {
String shType = conf.getTrimmed(OZONE_HANDLER_TYPE_KEY,
OZONE_HANDLER_TYPE_DEFAULT);
LOG.info("ObjectStoreHandler initializing with {}: {}",
OZONE_HANDLER_TYPE_KEY, shType);
boolean ozoneTrace = conf.getBoolean(OZONE_TRACE_ENABLED_KEY,
OZONE_TRACE_ENABLED_DEFAULT);
// Initialize Jersey container for object store web application.
if (OzoneConsts.OZONE_HANDLER_DISTRIBUTED.equalsIgnoreCase(shType)) {
RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
ProtobufRpcEngine.class);
long scmVersion =
RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
ProtobufRpcEngine.class);
long scmVersion =
RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
InetSocketAddress scmAddress =
getScmAddressForClients(conf);
this.storageContainerLocationClient =
new StorageContainerLocationProtocolClientSideTranslatorPB(
RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
scmAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
InetSocketAddress scmAddress =
getScmAddressForClients(conf);
this.storageContainerLocationClient =
new StorageContainerLocationProtocolClientSideTranslatorPB(
RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
scmAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
InetSocketAddress scmBlockAddress =
getScmAddressForBlockClients(conf);
this.scmBlockLocationClient =
new ScmBlockLocationProtocolClientSideTranslatorPB(
RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
InetSocketAddress scmBlockAddress =
getScmAddressForBlockClients(conf);
this.scmBlockLocationClient =
new ScmBlockLocationProtocolClientSideTranslatorPB(
RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
ProtobufRpcEngine.class);
long omVersion =
RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
InetSocketAddress omAddress = getOmAddress(conf);
this.ozoneManagerClient =
new OzoneManagerProtocolClientSideTranslatorPB(
RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
omAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
ProtobufRpcEngine.class);
long omVersion =
RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
InetSocketAddress omAddress = getOmAddress(conf);
this.ozoneManagerClient =
new OzoneManagerProtocolClientSideTranslatorPB(
RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
omAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
storageHandler = new DistributedStorageHandler(
new OzoneConfiguration(conf),
this.storageContainerLocationClient,
this.ozoneManagerClient);
} else {
if (OzoneConsts.OZONE_HANDLER_LOCAL.equalsIgnoreCase(shType)) {
storageHandler = new LocalStorageHandler(conf);
this.storageContainerLocationClient = null;
this.scmBlockLocationClient = null;
this.ozoneManagerClient = null;
} else {
throw new IllegalArgumentException(
String.format("Unrecognized value for %s: %s,"
+ " Allowed values are %s,%s",
OZONE_HANDLER_TYPE_KEY, shType,
OzoneConsts.OZONE_HANDLER_DISTRIBUTED,
OzoneConsts.OZONE_HANDLER_LOCAL));
}
}
storageHandler = new DistributedStorageHandler(
new OzoneConfiguration(conf),
this.storageContainerLocationClient,
this.ozoneManagerClient);
ApplicationAdapter aa =
new ApplicationAdapter(new ObjectStoreApplication());
Map<String, Object> settingsMap = new HashMap<>();

View File

@ -19,10 +19,11 @@
package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.localstorage.LocalStorageHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* This class is responsible for providing a {@link StorageHandler}
@ -31,6 +32,9 @@ import org.apache.hadoop.ozone.web.localstorage.LocalStorageHandler;
@InterfaceAudience.Private
public final class StorageHandlerBuilder {
private static final Logger LOG =
LoggerFactory.getLogger(StorageHandlerBuilder.class);
private static final ThreadLocal<StorageHandler>
STORAGE_HANDLER_THREAD_LOCAL = new ThreadLocal<>();
@ -40,15 +44,15 @@ public final class StorageHandlerBuilder {
*
* @return StorageHandler from thread-local storage
*/
public static StorageHandler getStorageHandler() {
public static StorageHandler getStorageHandler() throws IOException {
StorageHandler storageHandler = STORAGE_HANDLER_THREAD_LOCAL.get();
if (storageHandler != null) {
return storageHandler;
} else {
// This only happens while using mvn jetty:run for testing.
Configuration conf = new OzoneConfiguration();
return new LocalStorageHandler(conf);
LOG.error("No Storage Handler Configured.");
throw new IOException("Invalid Handler Configuration");
}
}
/**

View File

@ -1,385 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.localstorage;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;
import org.apache.hadoop.ozone.web.handlers.ListArgs;
import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.KeyInfo;
import org.apache.hadoop.ozone.web.response.ListBuckets;
import org.apache.hadoop.ozone.web.response.ListKeys;
import org.apache.hadoop.ozone.web.response.ListVolumes;
import org.apache.hadoop.ozone.web.response.VolumeInfo;
import java.io.IOException;
import java.io.OutputStream;
/**
* PLEASE NOTE : This file is a dummy backend for test purposes and prototyping
* effort only. It does not handle any Object semantics correctly, neither does
* it take care of security.
*/
@InterfaceAudience.Private
public class LocalStorageHandler implements StorageHandler {
private final Configuration conf;
/**
* Constructs LocalStorageHandler.
*
* @param conf ozone conf.
*/
public LocalStorageHandler(Configuration conf) {
this.conf = conf;
}
/**
* Creates Storage Volume.
*
* @param args - volumeArgs
* @throws IOException
*/
@Override
public void createVolume(VolumeArgs args) throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.createVolume(args);
}
/**
* setVolumeOwner - sets the owner of the volume.
*
* @param args volumeArgs
* @throws IOException
*/
@Override
public void setVolumeOwner(VolumeArgs args)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.setVolumeProperty(args, OzoneMetadataManager.VolumeProperty.OWNER);
}
/**
* Set Volume Quota Info.
*
* @param args - volumeArgs
* @param remove - true if the request is to remove the quota
* @throws IOException
*/
@Override
public void setVolumeQuota(VolumeArgs args, boolean remove)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
if (remove) {
OzoneQuota quota = new OzoneQuota();
args.setQuota(quota);
}
oz.setVolumeProperty(args, OzoneMetadataManager.VolumeProperty.QUOTA);
}
/**
* Checks if a Volume exists and the user specified has access to the volume.
*
* @param volume - Volume Name
* @param acl - Ozone acl which needs to be compared for access
* @return - Boolean - True if the user can modify the volume. This is
* possible for owners of the volume and admin users
* @throws IOException
*/
@Override
public boolean checkVolumeAccess(String volume, OzoneAcl acl)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.checkVolumeAccess(volume, acl);
}
/**
* Returns Info about the specified Volume.
*
* @param args - volumeArgs
* @return VolumeInfo
* @throws IOException
*/
@Override
public VolumeInfo getVolumeInfo(VolumeArgs args)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.getVolumeInfo(args);
}
/**
* Deletes an Empty Volume.
*
* @param args - Volume Args
* @throws IOException
*/
@Override
public void deleteVolume(VolumeArgs args) throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.deleteVolume(args);
}
/**
* Returns the List of Volumes owned by the specific user.
*
* @param args - ListArgs
* @return - List of Volumes
* @throws IOException
*/
@Override
public ListVolumes listVolumes(ListArgs args)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.listVolumes(args);
}
/**
* true if the bucket exists and user has read access to the bucket else
* throws Exception.
*
* @param args Bucket args structure
* @throws IOException
*/
@Override
public void checkBucketAccess(BucketArgs args)
throws IOException, OzoneException {
}
/**
* Creates a Bucket in specified Volume.
*
* @param args BucketArgs- BucketName, UserName and Acls
* @throws IOException
*/
@Override
public void createBucket(BucketArgs args) throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.createBucket(args);
}
/**
* Adds or Removes ACLs from a Bucket.
*
* @param args - BucketArgs
* @throws IOException
*/
@Override
public void setBucketAcls(BucketArgs args)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.setBucketProperty(args, OzoneMetadataManager.BucketProperty.ACLS);
}
/**
* Enables or disables Bucket Versioning.
*
* @param args - BucketArgs
* @throws IOException
*/
@Override
public void setBucketVersioning(BucketArgs args)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.setBucketProperty(args, OzoneMetadataManager.BucketProperty.VERSIONING);
}
/**
* Sets the Storage Class of a Bucket.
*
* @param args - BucketArgs
* @throws IOException
*/
@Override
public void setBucketStorageClass(BucketArgs args)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.setBucketProperty(args, OzoneMetadataManager.BucketProperty.STORAGETYPE);
}
/**
* Deletes a bucket if it is empty.
*
* @param args Bucket args structure
* @throws IOException
*/
@Override
public void deleteBucket(BucketArgs args) throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.deleteBucket(args);
}
/**
* Returns all Buckets of a specified Volume.
*
* @param args --User Args
* @return ListAllBuckets
* @throws OzoneException
*/
@Override
public ListBuckets listBuckets(ListArgs args)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.listBuckets(args);
}
/**
* Returns Bucket's Metadata as a String.
*
* @param args Bucket args structure
* @return Info about the bucket
* @throws IOException
*/
@Override
public BucketInfo getBucketInfo(BucketArgs args)
throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.getBucketInfo(args);
}
/**
* Writes a key in an existing bucket.
*
* @param args KeyArgs
* @return InputStream
* @throws OzoneException
*/
@Override
public OutputStream newKeyWriter(KeyArgs args) throws IOException,
OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.createKey(args);
}
/**
* Tells the file system that the object has been written out completely and
* it can do any house keeping operation that needs to be done.
*
* @param args Key Args
* @param stream
* @throws IOException
*/
@Override
public void commitKey(KeyArgs args, OutputStream stream) throws
IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.commitKey(args, stream);
}
/**
* Reads a key from an existing bucket.
*
* @param args KeyArgs
* @return LengthInputStream
* @throws IOException
*/
@Override
public LengthInputStream newKeyReader(KeyArgs args) throws IOException,
OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.newKeyReader(args);
}
/**
* Deletes an existing key.
*
* @param args KeyArgs
* @throws OzoneException
*/
@Override
public void deleteKey(KeyArgs args) throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
oz.deleteKey(args);
}
@Override
public void renameKey(KeyArgs args, String toKeyName)
throws IOException, OzoneException {
throw new UnsupportedOperationException("Not yet implemented");
}
/**
* Returns a list of Key.
*
* @param args KeyArgs
* @return BucketList
* @throws IOException
*/
@Override
public ListKeys listKeys(ListArgs args) throws IOException, OzoneException {
OzoneMetadataManager oz =
OzoneMetadataManager.getOzoneMetadataManager(conf);
return oz.listKeys(args);
}
/**
* Get information of the specified Key.
*
* @param args Key Args
*
* @return KeyInfo
*
* @throws IOException
* @throws OzoneException
*/
@Override
public KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException {
OzoneMetadataManager oz = OzoneMetadataManager
.getOzoneMetadataManager(conf);
return oz.getKeyInfo(args);
}
@Override
public void close() {
//No resource to close, do nothing.
}
}

View File

@ -57,8 +57,7 @@ public class TestOzoneFSInputStream {
/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true and
* OZONE_HANDLER_TYPE_KEY = "distributed"
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
@ -90,9 +89,6 @@ public class TestOzoneFSInputStream {
// Fetch the host and port for File System init
DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
.getDatanodeDetails();
int port = datanodeDetails
.getPort(DatanodeDetails.Port.Name.REST).getValue();
String host = datanodeDetails.getHostName();
// Set the fs.defaultFS and start the filesystem
String uri = String.format("%s://%s.%s/",