From 143dd560bf506fafd849aeb47a42becc6c13330d Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Mon, 11 Jun 2018 09:04:54 -0700 Subject: [PATCH] HDDS-133:Change format of .container files to Yaml. Contributed by Bharat Viswanadham --- hadoop-hdds/container-service/pom.xml | 6 + .../common/impl/ChunkLayOutVersion.java | 18 ++ .../container/common/impl/ContainerData.java | 28 +- .../common/impl/KeyValueContainerData.java | 19 +- .../container/common/impl/KeyValueYaml.java | 274 ++++++++++++++++++ .../common/TestKeyValueContainerData.java | 15 +- .../common/impl/TestKeyValueYaml.java | 158 ++++++++++ .../test/resources/additionalfields.container | 9 + .../src/test/resources/incorrect.container | 10 + hadoop-hdds/pom.xml | 2 + 10 files changed, 521 insertions(+), 18 deletions(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestKeyValueYaml.java create mode 100644 hadoop-hdds/container-service/src/test/resources/additionalfields.container create mode 100644 hadoop-hdds/container-service/src/test/resources/incorrect.container diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 542462e8c7d..43f400c727f 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -52,6 +52,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> test + + org.yaml + snakeyaml + 1.8 + + io.dropwizard.metrics metrics-core diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java index fff68de65fa..d1b1bd66493 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.container.common.impl; +import com.google.common.base.Preconditions; + /** * Defines layout versions for the Chunks. */ @@ -42,6 +44,22 @@ public final class ChunkLayOutVersion { this.description = description; } + /** + * Return ChunkLayOutVersion object for the chunkVersion. + * @param chunkVersion + * @return ChunkLayOutVersion + */ + public static ChunkLayOutVersion getChunkLayOutVersion(int chunkVersion) { + Preconditions.checkArgument((chunkVersion <= ChunkLayOutVersion + .getLatestVersion().getVersion())); + for(ChunkLayOutVersion chunkLayOutVersion : CHUNK_LAYOUT_VERSION_INFOS) { + if(chunkLayOutVersion.getVersion() == chunkVersion) { + return chunkLayOutVersion; + } + } + return null; + } + /** * Returns all versions. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index 06aae6665a5..0bd7795a263 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -42,7 +42,7 @@ public class ContainerData { private final long containerId; // Layout version of the container data - private final ChunkLayOutVersion layOutVersion; + private final int layOutVersion; // Metadata of the container will be a key value pair. // This can hold information like volume name, owner etc., @@ -67,7 +67,27 @@ public class ContainerData { public ContainerData(ContainerType type, long containerId) { this.containerType = type; this.containerId = containerId; - this.layOutVersion = ChunkLayOutVersion.getLatestVersion(); + this.layOutVersion = ChunkLayOutVersion.getLatestVersion().getVersion(); + this.metadata = new TreeMap<>(); + this.state = ContainerLifeCycleState.OPEN; + this.readCount = new AtomicLong(0L); + this.readBytes = new AtomicLong(0L); + this.writeCount = new AtomicLong(0L); + this.writeBytes = new AtomicLong(0L); + this.bytesUsed = new AtomicLong(0L); + } + + /** + * Creates a ContainerData Object, which holds metadata of the container. + * @param type - ContainerType + * @param containerId - ContainerId + * @param layOutVersion - Container layOutVersion + */ + public ContainerData(ContainerType type, long containerId, int + layOutVersion) { + this.containerType = type; + this.containerId = containerId; + this.layOutVersion = layOutVersion; this.metadata = new TreeMap<>(); this.state = ContainerLifeCycleState.OPEN; this.readCount = new AtomicLong(0L); @@ -113,8 +133,8 @@ public class ContainerData { * Returns the layOutVersion of the actual container data format. * @return layOutVersion */ - public ChunkLayOutVersion getLayOutVersion() { - return layOutVersion; + public int getLayOutVersion() { + return ChunkLayOutVersion.getChunkLayOutVersion(layOutVersion).getVersion(); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java index 37eaa49fc1c..57b52643313 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java @@ -52,12 +52,23 @@ public class KeyValueContainerData extends ContainerData { this.numPendingDeletionBlocks = 0; } + /** + * Constructs KeyValueContainerData object. + * @param type - containerType + * @param id - ContainerId + * @param layOutVersion + */ + public KeyValueContainerData(ContainerProtos.ContainerType type, long id, + int layOutVersion) { + super(type, id, layOutVersion); + this.numPendingDeletionBlocks = 0; + } /** * Returns path. * * @return - path */ - public String getDBPath() { + public String getDbPath() { return dbPath; } @@ -66,7 +77,7 @@ public class KeyValueContainerData extends ContainerData { * * @param path - String. */ - public void setDBPath(String path) { + public void setDbPath(String path) { this.dbPath = path; } @@ -74,7 +85,7 @@ public class KeyValueContainerData extends ContainerData { * Get container file path. * @return - Physical path where container file and checksum is stored. */ - public String getContainerPath() { + public String getContainerFilePath() { return containerFilePath; } @@ -82,7 +93,7 @@ public class KeyValueContainerData extends ContainerData { * Set container Path. * @param containerPath - File path. */ - public void setContainerPath(String containerPath) { + public void setContainerFilePath(String containerPath) { this.containerFilePath = containerPath; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java new file mode 100644 index 00000000000..b7ce0d96cbb --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java @@ -0,0 +1,274 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.yaml.snakeyaml.Yaml; + + +import java.beans.IntrospectionException; +import java.io.IOException; +import java.io.InputStream; +import java.io.Writer; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.OutputStreamWriter; + +import java.io.File; + + +import java.util.Set; +import java.util.TreeSet; +import java.util.Map; + +import org.yaml.snakeyaml.constructor.AbstractConstruct; +import org.yaml.snakeyaml.constructor.Constructor; +import org.yaml.snakeyaml.introspector.BeanAccess; +import org.yaml.snakeyaml.introspector.Property; +import org.yaml.snakeyaml.introspector.PropertyUtils; +import org.yaml.snakeyaml.nodes.MappingNode; +import org.yaml.snakeyaml.nodes.Node; +import org.yaml.snakeyaml.nodes.ScalarNode; +import org.yaml.snakeyaml.nodes.Tag; +import org.yaml.snakeyaml.representer.Representer; + +/** + * Class for creating and reading .container files. + */ + +public final class KeyValueYaml { + + private KeyValueYaml() { + + } + /** + * Creates a .container file in yaml format. + * + * @param containerFile + * @param containerData + * @throws IOException + */ + public static void createContainerFile(File containerFile, ContainerData + containerData) throws IOException { + + Preconditions.checkNotNull(containerFile, "yamlFile cannot be null"); + Preconditions.checkNotNull(containerData, "containerData cannot be null"); + + PropertyUtils propertyUtils = new PropertyUtils(); + propertyUtils.setBeanAccess(BeanAccess.FIELD); + propertyUtils.setAllowReadOnlyProperties(true); + + Representer representer = new KeyValueContainerDataRepresenter(); + representer.setPropertyUtils(propertyUtils); + representer.addClassTag(org.apache.hadoop.ozone.container.common.impl + .KeyValueContainerData.class, new Tag("KeyValueContainerData")); + + Constructor keyValueDataConstructor = new KeyValueDataConstructor(); + + Yaml yaml = new Yaml(keyValueDataConstructor, representer); + + Writer writer = new OutputStreamWriter(new FileOutputStream(containerFile), + "UTF-8"); + yaml.dump(containerData, writer); + writer.close(); + } + + /** + * Read the yaml file, and return containerData. + * + * @param containerFile + * @throws IOException + */ + public static KeyValueContainerData readContainerFile(File containerFile) + throws IOException { + Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); + + InputStream input = null; + KeyValueContainerData keyValueContainerData; + try { + PropertyUtils propertyUtils = new PropertyUtils(); + propertyUtils.setBeanAccess(BeanAccess.FIELD); + propertyUtils.setAllowReadOnlyProperties(true); + + Representer representer = new KeyValueContainerDataRepresenter(); + representer.setPropertyUtils(propertyUtils); + representer.addClassTag(org.apache.hadoop.ozone.container.common.impl + .KeyValueContainerData.class, new Tag("KeyValueContainerData")); + + Constructor keyValueDataConstructor = new KeyValueDataConstructor(); + + Yaml yaml = new Yaml(keyValueDataConstructor, representer); + yaml.setBeanAccess(BeanAccess.FIELD); + + input = new FileInputStream(containerFile); + keyValueContainerData = (KeyValueContainerData) + yaml.load(input); + } finally { + if (input!= null) { + input.close(); + } + } + return keyValueContainerData; + } + + /** + * Representer class to define which fields need to be stored in yaml file. + */ + private static class KeyValueContainerDataRepresenter extends Representer { + @Override + protected Set getProperties(Class type) + throws IntrospectionException { + Set set = super.getProperties(type); + Set filtered = new TreeSet(); + if (type.equals(KeyValueContainerData.class)) { + // filter properties + for (Property prop : set) { + String name = prop.getName(); + // When a new field needs to be added, it needs to be added here. + if (name.equals("containerType") || name.equals("containerId") || + name.equals("layOutVersion") || name.equals("state") || + name.equals("metadata") || name.equals("dbPath") || + name.equals("containerFilePath") || name.equals( + "containerDBType")) { + filtered.add(prop); + } + } + } + return filtered; + } + } + + /** + * Constructor class for KeyValueData, which will be used by Yaml. + */ + private static class KeyValueDataConstructor extends Constructor { + KeyValueDataConstructor() { + //Adding our own specific constructors for tags. + this.yamlConstructors.put(new Tag("KeyValueContainerData"), + new ConstructKeyValueContainerData()); + this.yamlConstructors.put(Tag.INT, new ConstructLong()); + } + + private class ConstructKeyValueContainerData extends AbstractConstruct { + public Object construct(Node node) { + MappingNode mnode = (MappingNode) node; + Map nodes = constructMapping(mnode); + String type = (String) nodes.get("containerType"); + + ContainerProtos.ContainerType containerType = ContainerProtos + .ContainerType.KeyValueContainer; + if (type.equals("KeyValueContainer")) { + containerType = ContainerProtos.ContainerType.KeyValueContainer; + } + + //Needed this, as TAG.INT type is by default converted to Long. + long layOutVersion = (long) nodes.get("layOutVersion"); + int lv = (int) layOutVersion; + + //When a new field is added, it needs to be added here. + KeyValueContainerData kvData = new KeyValueContainerData(containerType, + (long) nodes.get("containerId"), lv); + kvData.setContainerDBType((String)nodes.get("containerDBType")); + kvData.setDbPath((String) nodes.get("dbPath")); + kvData.setContainerFilePath((String) nodes.get("containerFilePath")); + Map meta = (Map) nodes.get("metadata"); + meta.forEach((key, val) -> { + try { + kvData.addMetadata(key, val); + } catch (IOException e) { + throw new IllegalStateException("Unexpected " + + "Key Value Pair " + "(" + key + "," + val +")in the metadata " + + "for containerId " + (long) nodes.get("containerId")); + } + }); + String state = (String) nodes.get("state"); + switch (state) { + case "OPEN": + kvData.setState(ContainerProtos.ContainerLifeCycleState.OPEN); + break; + case "CLOSING": + kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSING); + break; + case "CLOSED": + kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED); + break; + default: + throw new IllegalStateException("Unexpected " + + "ContainerLifeCycleState " + state + " for the containerId " + + (long) nodes.get("containerId")); + } + return kvData; + } + } + + //Below code is taken from snake yaml, as snakeyaml tries to fit the + // number if it fits in integer, otherwise returns long. So, slightly + // modified the code to return long in all cases. + private class ConstructLong extends AbstractConstruct { + public Object construct(Node node) { + String value = constructScalar((ScalarNode) node).toString() + .replaceAll("_", ""); + int sign = +1; + char first = value.charAt(0); + if (first == '-') { + sign = -1; + value = value.substring(1); + } else if (first == '+') { + value = value.substring(1); + } + int base = 10; + if ("0".equals(value)) { + return Long.valueOf(0); + } else if (value.startsWith("0b")) { + value = value.substring(2); + base = 2; + } else if (value.startsWith("0x")) { + value = value.substring(2); + base = 16; + } else if (value.startsWith("0")) { + value = value.substring(1); + base = 8; + } else if (value.indexOf(':') != -1) { + String[] digits = value.split(":"); + int bes = 1; + int val = 0; + for (int i = 0, j = digits.length; i < j; i++) { + val += (Long.parseLong(digits[(j - i) - 1]) * bes); + bes *= 60; + } + return createNumber(sign, String.valueOf(val), 10); + } else { + return createNumber(sign, value, 10); + } + return createNumber(sign, value, base); + } + } + + private Number createNumber(int sign, String number, int radix) { + Number result; + if (sign < 0) { + number = "-" + number; + } + result = Long.valueOf(number, radix); + return result; + } + } + +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java index 1541921ba3d..e057f6f7cd2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java @@ -38,8 +38,6 @@ public class TestKeyValueContainerData { long containerId = 1L; ContainerProtos.ContainerType containerType = ContainerProtos .ContainerType.KeyValueContainer; - String path = "/tmp"; - String containerDBType = "RocksDB"; int layOutVersion = 1; ContainerProtos.ContainerLifeCycleState state = ContainerProtos .ContainerLifeCycleState.OPEN; @@ -57,10 +55,9 @@ public class TestKeyValueContainerData { KeyValueContainerData kvData = KeyValueContainerData.getFromProtoBuf( containerData); - assertEquals(containerType, kvData.getContainerType()); assertEquals(containerId, kvData.getContainerId()); - assertEquals(layOutVersion, kvData.getLayOutVersion().getVersion()); + assertEquals(layOutVersion, kvData.getLayOutVersion()); assertEquals(state, kvData.getState()); assertEquals(2, kvData.getMetadata().size()); assertEquals("ozone", kvData.getMetadata().get("VOLUME")); @@ -75,11 +72,9 @@ public class TestKeyValueContainerData { .ContainerType.KeyValueContainer; String path = "/tmp"; String containerDBType = "RocksDB"; - int layOutVersion = 1; ContainerProtos.ContainerLifeCycleState state = ContainerProtos .ContainerLifeCycleState.CLOSED; AtomicLong val = new AtomicLong(0); - AtomicLong updatedVal = new AtomicLong(100); KeyValueContainerData kvData = new KeyValueContainerData(containerType, containerId); @@ -97,8 +92,8 @@ public class TestKeyValueContainerData { kvData.setState(state); kvData.setContainerDBType(containerDBType); - kvData.setContainerPath(path); - kvData.setDBPath(path); + kvData.setContainerFilePath(path); + kvData.setDbPath(path); kvData.incrReadBytes(10); kvData.incrWriteBytes(10); kvData.incrReadCount(); @@ -106,8 +101,8 @@ public class TestKeyValueContainerData { assertEquals(state, kvData.getState()); assertEquals(containerDBType, kvData.getContainerDBType()); - assertEquals(path, kvData.getContainerPath()); - assertEquals(path, kvData.getDBPath()); + assertEquals(path, kvData.getContainerFilePath()); + assertEquals(path, kvData.getDbPath()); assertEquals(10, kvData.getReadBytes()); assertEquals(10, kvData.getWriteBytes()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestKeyValueYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestKeyValueYaml.java new file mode 100644 index 00000000000..06f6f9d799a --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestKeyValueYaml.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * This class tests create/read .container files. + */ +public class TestKeyValueYaml { + + @Test + public void testCreateContainerFile() throws IOException { + String path = new FileSystemTestHelper().getTestRootDir(); + String containerPath = "1.container"; + + File filePath = new File(new FileSystemTestHelper().getTestRootDir()); + filePath.mkdirs(); + + KeyValueContainerData keyValueContainerData = new KeyValueContainerData( + ContainerProtos.ContainerType.KeyValueContainer, Long.MAX_VALUE); + keyValueContainerData.setContainerDBType("RocksDB"); + keyValueContainerData.setDbPath(path); + keyValueContainerData.setContainerFilePath(path); + + File containerFile = new File(filePath, containerPath); + + // Create .container file with ContainerData + KeyValueYaml.createContainerFile(containerFile, keyValueContainerData); + + //Check .container file exists or not. + assertTrue(containerFile.exists()); + + // Read from .container file, and verify data. + KeyValueContainerData kvData = KeyValueYaml.readContainerFile( + containerFile); + assertEquals(Long.MAX_VALUE, kvData.getContainerId()); + assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData + .getContainerType()); + assertEquals("RocksDB", kvData.getContainerDBType()); + assertEquals(path, kvData.getContainerFilePath()); + assertEquals(path, kvData.getDbPath()); + assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData + .getState()); + assertEquals(1, kvData.getLayOutVersion()); + assertEquals(0, kvData.getMetadata().size()); + + // Update ContainerData. + kvData.addMetadata("VOLUME", "hdfs"); + kvData.addMetadata("OWNER", "ozone"); + kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED); + + + // Update .container file with new ContainerData. + containerFile = new File(filePath, containerPath); + KeyValueYaml.createContainerFile(containerFile, kvData); + + // Reading newly updated data from .container file + kvData = KeyValueYaml.readContainerFile(containerFile); + + // verify data. + assertEquals(Long.MAX_VALUE, kvData.getContainerId()); + assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData + .getContainerType()); + assertEquals("RocksDB", kvData.getContainerDBType()); + assertEquals(path, kvData.getContainerFilePath()); + assertEquals(path, kvData.getDbPath()); + assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData + .getState()); + assertEquals(1, kvData.getLayOutVersion()); + assertEquals(2, kvData.getMetadata().size()); + assertEquals("hdfs", kvData.getMetadata().get("VOLUME")); + assertEquals("ozone", kvData.getMetadata().get("OWNER")); + + FileUtil.fullyDelete(filePath); + + + } + + @Test + public void testIncorrectContainerFile() throws IOException{ + try { + String path = "incorrect.container"; + //Get file from resources folder + ClassLoader classLoader = getClass().getClassLoader(); + File file = new File(classLoader.getResource(path).getFile()); + KeyValueContainerData kvData = KeyValueYaml.readContainerFile(file); + fail("testIncorrectContainerFile failed"); + } catch (IllegalStateException ex) { + GenericTestUtils.assertExceptionContains("Unexpected " + + "ContainerLifeCycleState", ex); + } + } + + + @Test + public void testCheckBackWardCompatabilityOfContainerFile() throws + IOException { + // This test is for if we upgrade, and then .container files added by new + // server will have new fields added to .container file, after a while we + // decided to rollback. Then older ozone can read .container files + // created or not. + + try { + String path = "additionalfields.container"; + //Get file from resources folder + ClassLoader classLoader = getClass().getClassLoader(); + File file = new File(classLoader.getResource(path).getFile()); + KeyValueContainerData kvData = KeyValueYaml.readContainerFile(file); + + //Checking the Container file data is consistent or not + assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData + .getState()); + assertEquals("RocksDB", kvData.getContainerDBType()); + assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData + .getContainerType()); + assertEquals(9223372036854775807L, kvData.getContainerId()); + assertEquals("/hdds/current/aed-fg4-hji-jkl/containerdir0/1", kvData + .getDbPath()); + assertEquals("/hdds/current/aed-fg4-hji-jkl/containerdir0/1", kvData + .getContainerFilePath()); + assertEquals(1, kvData.getLayOutVersion()); + assertEquals(2, kvData.getMetadata().size()); + + } catch (Exception ex) { + fail("testCheckBackWardCompatabilityOfContainerFile failed"); + } + } + + +} diff --git a/hadoop-hdds/container-service/src/test/resources/additionalfields.container b/hadoop-hdds/container-service/src/test/resources/additionalfields.container new file mode 100644 index 00000000000..d0df0fec391 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/resources/additionalfields.container @@ -0,0 +1,9 @@ +! +containerDBType: RocksDB +containerFilePath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 +containerId: 9223372036854775807 +containerType: KeyValueContainer +dbPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 +layOutVersion: 1 +metadata: {OWNER: ozone, VOLUME: hdfs} +state: CLOSED \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.container b/hadoop-hdds/container-service/src/test/resources/incorrect.container new file mode 100644 index 00000000000..d56702377c2 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/resources/incorrect.container @@ -0,0 +1,10 @@ +! +containerDBType: RocksDB +containerFilePath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 +containerId: 9223372036854775807 +containerType: KeyValueContainer +dbPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 +layOutVersion: 1 +metadata: {OWNER: ozone, VOLUME: hdfs} +state: INVALID +aclEnabled: true \ No newline at end of file diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index fab45e23586..573803b1b47 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -102,6 +102,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/main/resources/webapps/static/nvd3-1.8.5.min.css.map src/main/resources/webapps/static/nvd3-1.8.5.min.js src/main/resources/webapps/static/nvd3-1.8.5.min.js.map + src/test/resources/incorrect.container + src/test/resources/additionalfields.container