HDFS-13446. Ozone: Fix OzoneFileSystem contract test failures. Contributed by Mukul Kumar Singh.
This commit is contained in:
parent
908ddab55b
commit
025058f251
|
@ -52,6 +52,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.dropwizard.metrics</groupId>
|
||||||
|
<artifactId>metrics-core</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
|
|
|
@ -20,6 +20,7 @@ import com.google.common.base.Strings;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.HddsUtils;
|
import org.apache.hadoop.hdds.HddsUtils;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
|
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
|
||||||
import org.apache.hadoop.ozone.container.common.statemachine
|
import org.apache.hadoop.ozone.container.common.statemachine
|
||||||
.DatanodeStateMachine;
|
.DatanodeStateMachine;
|
||||||
|
@ -107,6 +108,12 @@ public class InitDatanodeState implements DatanodeState,
|
||||||
*/
|
*/
|
||||||
private void persistContainerDatanodeDetails() throws IOException {
|
private void persistContainerDatanodeDetails() throws IOException {
|
||||||
String dataNodeIDPath = HddsUtils.getDatanodeIdFilePath(conf);
|
String dataNodeIDPath = HddsUtils.getDatanodeIdFilePath(conf);
|
||||||
|
if (Strings.isNullOrEmpty(dataNodeIDPath)) {
|
||||||
|
LOG.error("A valid file path is needed for config setting {}",
|
||||||
|
ScmConfigKeys.OZONE_SCM_DATANODE_ID);
|
||||||
|
this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
|
||||||
|
return;
|
||||||
|
}
|
||||||
File idPath = new File(dataNodeIDPath);
|
File idPath = new File(dataNodeIDPath);
|
||||||
DatanodeDetails datanodeDetails = this.context.getParent()
|
DatanodeDetails datanodeDetails = this.context.getParent()
|
||||||
.getDatanodeDetails();
|
.getDatanodeDetails();
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
# log4j configuration used during build and unit tests
|
||||||
|
|
||||||
|
log4j.rootLogger=INFO,stdout
|
||||||
|
log4j.threshold=ALL
|
||||||
|
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||||
|
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||||
|
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
|
@ -100,6 +100,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>hamcrest-all</artifactId>
|
<artifactId>hamcrest-all</artifactId>
|
||||||
<version>1.3</version>
|
<version>1.3</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.bouncycastle</groupId>
|
||||||
|
<artifactId>bcprov-jdk16</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
|
|
@ -76,7 +76,7 @@ public class TestStorageContainerManagerHttpServer {
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
keystoresDir = new File(BASEDIR).getAbsolutePath();
|
keystoresDir = new File(BASEDIR).getAbsolutePath();
|
||||||
sslConfDir = KeyStoreTestUtil.getClasspathDir(
|
sslConfDir = KeyStoreTestUtil.getClasspathDir(
|
||||||
org.apache.hadoop.hdfs.server.namenode.TestNameNodeHttpServer.class);
|
TestStorageContainerManagerHttpServer.class);
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||||
connectionFactory =
|
connectionFactory =
|
||||||
URLConnectionFactory.newDefaultURLConnectionFactory(conf);
|
URLConnectionFactory.newDefaultURLConnectionFactory(conf);
|
||||||
|
|
|
@ -262,7 +262,7 @@ public class TestDeletedBlockLog {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
String containerName = null;
|
String containerName = null;
|
||||||
DatanodeDetails dnDd1 = DatanodeDetails.newBuilder()
|
DatanodeDetails dnDd1 = DatanodeDetails.newBuilder()
|
||||||
.setUuid("node1")
|
.setUuid(UUID.randomUUID().toString())
|
||||||
.setIpAddress("127.0.0.1")
|
.setIpAddress("127.0.0.1")
|
||||||
.setHostName("localhost")
|
.setHostName("localhost")
|
||||||
.setContainerPort(0)
|
.setContainerPort(0)
|
||||||
|
@ -270,7 +270,7 @@ public class TestDeletedBlockLog {
|
||||||
.setOzoneRestPort(0)
|
.setOzoneRestPort(0)
|
||||||
.build();
|
.build();
|
||||||
DatanodeDetails dnId2 = DatanodeDetails.newBuilder()
|
DatanodeDetails dnId2 = DatanodeDetails.newBuilder()
|
||||||
.setUuid("node2")
|
.setUuid(UUID.randomUUID().toString())
|
||||||
.setIpAddress("127.0.0.1")
|
.setIpAddress("127.0.0.1")
|
||||||
.setHostName("localhost")
|
.setHostName("localhost")
|
||||||
.setContainerPort(0)
|
.setContainerPort(0)
|
||||||
|
|
|
@ -315,7 +315,7 @@ public class MockNodeManager implements NodeManager {
|
||||||
|
|
||||||
// Returns the number of commands that is queued to this node manager.
|
// Returns the number of commands that is queued to this node manager.
|
||||||
public int getCommandCount(DatanodeDetails dd) {
|
public int getCommandCount(DatanodeDetails dd) {
|
||||||
List<SCMCommand> list = commandMap.get(dd);
|
List<SCMCommand> list = commandMap.get(dd.getUuid());
|
||||||
return (list == null) ? 0 : list.size();
|
return (list == null) ? 0 : list.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -294,6 +294,9 @@ public class ChunkGroupOutputStream extends OutputStream {
|
||||||
@Override
|
@Override
|
||||||
public synchronized void flush() throws IOException {
|
public synchronized void flush() throws IOException {
|
||||||
checkNotClosed();
|
checkNotClosed();
|
||||||
|
if (streamEntries.size() == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
for (int i = 0; i <= currentStreamIndex; i++) {
|
for (int i = 0; i <= currentStreamIndex; i++) {
|
||||||
streamEntries.get(i).flush();
|
streamEntries.get(i).flush();
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,6 +85,31 @@
|
||||||
<artifactId>hadoop-hdds-common</artifactId>
|
<artifactId>hadoop-hdds-common</artifactId>
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-hdds-server-scm</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-hdds-server-framework</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-ozone-ozone-manager</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-hdds-container-service</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-hdds-client</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-ozone-common</artifactId>
|
<artifactId>hadoop-ozone-common</artifactId>
|
||||||
|
|
|
@ -93,8 +93,8 @@ public class TestOzoneFSInputStream {
|
||||||
String host = dataNode.getDatanodeHostname();
|
String host = dataNode.getDatanodeHostname();
|
||||||
|
|
||||||
// Set the fs.defaultFS and start the filesystem
|
// Set the fs.defaultFS and start the filesystem
|
||||||
String uri = String.format("%s://%s:%d/%s/%s",
|
String uri = String.format("%s://%s.%s/",
|
||||||
Constants.OZONE_URI_SCHEME, host, port, volumeName, bucketName);
|
Constants.OZONE_URI_SCHEME, bucketName, volumeName);
|
||||||
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
|
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
|
||||||
fs = FileSystem.get(conf);
|
fs = FileSystem.get(conf);
|
||||||
int fileLen = 100 * 1024 * 1024;
|
int fileLen = 100 * 1024 * 1024;
|
||||||
|
|
|
@ -32,14 +32,13 @@ import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.lang.RandomStringUtils;
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
|
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
|
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
|
@ -99,7 +98,7 @@ public class TestOzoneFileInterfaces {
|
||||||
public void init() throws Exception {
|
public void init() throws Exception {
|
||||||
OzoneConfiguration conf = new OzoneConfiguration();
|
OzoneConfiguration conf = new OzoneConfiguration();
|
||||||
cluster = new MiniOzoneClassicCluster.Builder(conf)
|
cluster = new MiniOzoneClassicCluster.Builder(conf)
|
||||||
.numDataNodes(10)
|
.numDataNodes(3)
|
||||||
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
|
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
|
||||||
.build();
|
.build();
|
||||||
storageHandler =
|
storageHandler =
|
||||||
|
@ -119,11 +118,6 @@ public class TestOzoneFileInterfaces {
|
||||||
BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
|
BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
|
||||||
storageHandler.createBucket(bucketArgs);
|
storageHandler.createBucket(bucketArgs);
|
||||||
|
|
||||||
// Fetch the host and port for File System init
|
|
||||||
DataNode dataNode = cluster.getDataNodes().get(0);
|
|
||||||
int port = dataNode.getInfoPort();
|
|
||||||
String host = dataNode.getDatanodeHostname();
|
|
||||||
|
|
||||||
rootPath = String
|
rootPath = String
|
||||||
.format("%s://%s.%s/", Constants.OZONE_URI_SCHEME, bucketName,
|
.format("%s://%s.%s/", Constants.OZONE_URI_SCHEME, bucketName,
|
||||||
volumeName);
|
volumeName);
|
||||||
|
@ -147,7 +141,7 @@ public class TestOzoneFileInterfaces {
|
||||||
public void testFileSystemInit() throws IOException {
|
public void testFileSystemInit() throws IOException {
|
||||||
if (setDefaultFs) {
|
if (setDefaultFs) {
|
||||||
assertTrue(
|
assertTrue(
|
||||||
"The initialized file system is not OzoneFileSysetem but " +
|
"The initialized file system is not OzoneFileSystem but " +
|
||||||
fs.getClass(),
|
fs.getClass(),
|
||||||
fs instanceof OzoneFileSystem);
|
fs instanceof OzoneFileSystem);
|
||||||
assertEquals(Constants.OZONE_URI_SCHEME, fs.getUri().getScheme());
|
assertEquals(Constants.OZONE_URI_SCHEME, fs.getUri().getScheme());
|
|
@ -20,12 +20,11 @@ package org.apache.hadoop.fs.ozone.contract;
|
||||||
|
|
||||||
import org.apache.commons.lang.RandomStringUtils;
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
import org.apache.hadoop.fs.ozone.Constants;
|
import org.apache.hadoop.fs.ozone.Constants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
|
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
|
import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
|
@ -105,8 +104,6 @@ class OzoneContract extends AbstractFSContract {
|
||||||
} catch (OzoneException e) {
|
} catch (OzoneException e) {
|
||||||
throw new IOException(e.getMessage());
|
throw new IOException(e.getMessage());
|
||||||
}
|
}
|
||||||
DataNode dataNode = cluster.getDataNodes().get(0);
|
|
||||||
final int port = dataNode.getInfoPort();
|
|
||||||
|
|
||||||
String uri = String.format("%s://%s.%s/",
|
String uri = String.format("%s://%s.%s/",
|
||||||
Constants.OZONE_URI_SCHEME, bucketName, volumeName);
|
Constants.OZONE_URI_SCHEME, bucketName, volumeName);
|
Loading…
Reference in New Issue