diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 25b68e00918..f912f029f2c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -98,7 +98,6 @@ public final class OzoneConsts { public static final String OM_DB_NAME = "om.db"; public static final String STORAGE_DIR_CHUNKS = "chunks"; - public static final String CONTAINER_FILE_CHECKSUM_EXTENSION = ".chksm"; /** * Supports Bucket Versioning. @@ -190,4 +189,5 @@ private OzoneConsts() { public static final String METADATA_PATH = "metadataPath"; public static final String CHUNKS_PATH = "chunksPath"; public static final String CONTAINER_DB_TYPE = "containerDBType"; + public static final String CHECKSUM = "checksum"; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index 1d5dfc542a0..77a891a5a3b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -19,9 +19,11 @@ package org.apache.hadoop.ozone.container.common.helpers; import com.google.common.base.Preconditions; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos @@ -31,8 +33,8 @@ import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION; import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,8 +45,17 @@ import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import org.yaml.snakeyaml.Yaml; import static org.apache.commons.io.FilenameUtils.removeExtension; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos + .Result.CONTAINER_CHECKSUM_ERROR; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos + .Result.NO_SUCH_ALGORITHM; +import static org.apache.hadoop.ozone.container.common.impl.ContainerData + .CHARSET_ENCODING; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION; + /** * A set of helper functions to create proper responses. @@ -245,4 +256,48 @@ public synchronized static DatanodeDetails readDatanodeDetailsFrom(File path) + path.getAbsolutePath(), e); } } + + /** + * Verify that the checksum stored in containerData is equal to the + * computed checksum. + * @param containerData + * @throws IOException + */ + public static void verifyChecksum(ContainerData containerData) + throws IOException { + String storedChecksum = containerData.getChecksum(); + + Yaml yaml = ContainerDataYaml.getYamlForContainerType( + containerData.getContainerType()); + containerData.computeAndSetChecksum(yaml); + String computedChecksum = containerData.getChecksum(); + + if (storedChecksum == null || !storedChecksum.equals(computedChecksum)) { + throw new StorageContainerException("Container checksum error for " + + "ContainerID: " + containerData.getContainerID() + ". " + + "\nStored Checksum: " + storedChecksum + + "\nExpected Checksum: " + computedChecksum, + CONTAINER_CHECKSUM_ERROR); + } + } + + /** + * Return the SHA-256 chesksum of the containerData. + * @param containerDataYamlStr ContainerData as a Yaml String + * @return Checksum of the container data + * @throws StorageContainerException + */ + public static String getChecksum(String containerDataYamlStr) + throws StorageContainerException { + MessageDigest sha; + try { + sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); + sha.update(containerDataYamlStr.getBytes(CHARSET_ENCODING)); + return DigestUtils.sha256Hex(sha.digest()); + } catch (NoSuchAlgorithmException e) { + throw new StorageContainerException("Unable to create Message Digest, " + + "usually this is a java configuration issue.", NO_SUCH_ALGORITHM); + } + } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index a7e2b55fecb..5803628a690 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -18,22 +18,33 @@ package org.apache.hadoop.ozone.container.common.impl; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.nio.charset.Charset; import java.util.List; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. ContainerType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. ContainerLifeCycleState; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import java.io.IOException; import java.util.Collections; import java.util.Map; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import org.yaml.snakeyaml.Yaml; import static java.lang.Math.max; +import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE; +import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION; +import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE_GB; +import static org.apache.hadoop.ozone.OzoneConsts.METADATA; +import static org.apache.hadoop.ozone.OzoneConsts.STATE; /** * ContainerData is the in-memory representation of container metadata and is @@ -72,6 +83,23 @@ public abstract class ContainerData { private long deleteTransactionId; + private String checksum; + public static final Charset CHARSET_ENCODING = Charset.forName("UTF-8"); + private static final String DUMMY_CHECKSUM = new String(new byte[64], + CHARSET_ENCODING); + + // Common Fields need to be stored in .container file. + protected static final List YAML_FIELDS = + Collections.unmodifiableList(Lists.newArrayList( + CONTAINER_TYPE, + CONTAINER_ID, + LAYOUTVERSION, + STATE, + METADATA, + MAX_SIZE_GB, + CHECKSUM)); + + /** * Number of pending deletion blocks in container. */ @@ -113,6 +141,7 @@ protected ContainerData(ContainerType type, long containerId, this.maxSizeGB = size; this.numPendingDeletionBlocks = new AtomicInteger(0); this.deleteTransactionId = 0; + setChecksumTo0ByteArray(); } /** @@ -400,6 +429,41 @@ public int getNumPendingDeletionBlocks() { return this.numPendingDeletionBlocks.get(); } + public void setChecksumTo0ByteArray() { + this.checksum = DUMMY_CHECKSUM; + } + + public void setChecksum(String checkSum) { + this.checksum = checkSum; + } + + public String getChecksum() { + return this.checksum; + } + + /** + * Compute the checksum for ContainerData using the specified Yaml (based + * on ContainerType) and set the checksum. + * + * Checksum of ContainerData is calculated by setting the + * {@link ContainerData#checksum} field to a 64-byte array with all 0's - + * {@link ContainerData#DUMMY_CHECKSUM}. After the checksum is calculated, + * the checksum field is updated with this value. + * + * @param yaml Yaml for ContainerType to get the ContainerData as Yaml String + * @throws IOException + */ + public void computeAndSetChecksum(Yaml yaml) throws IOException { + // Set checksum to dummy value - 0 byte array, to calculate the checksum + // of rest of the data. + setChecksumTo0ByteArray(); + + // Dump yaml data into a string to compute its checksum + String containerDataYamlStr = yaml.dump(this); + + this.checksum = ContainerUtils.getChecksum(containerDataYamlStr); + } + /** * Returns a ProtoBuf Message from ContainerData. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index 90af24f3bf5..aed75d3ee2e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -20,10 +20,14 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos + .ContainerType; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; import java.beans.IntrospectionException; @@ -59,9 +63,13 @@ public final class ContainerDataYaml { + private static final Logger LOG = + LoggerFactory.getLogger(ContainerDataYaml.class); + private ContainerDataYaml() { } + /** * Creates a .container file in yaml format. * @@ -69,38 +77,29 @@ private ContainerDataYaml() { * @param containerData * @throws IOException */ - public static void createContainerFile(ContainerProtos.ContainerType - containerType, File containerFile, - ContainerData containerData) throws - IOException { + public static void createContainerFile(ContainerType containerType, + ContainerData containerData, File containerFile) throws IOException { + Writer writer = null; + try { + // Create Yaml for given container type + Yaml yaml = getYamlForContainerType(containerType); + // Compute Checksum and update ContainerData + containerData.computeAndSetChecksum(yaml); - Preconditions.checkNotNull(containerFile, "yamlFile cannot be null"); - Preconditions.checkNotNull(containerData, "containerData cannot be null"); - Preconditions.checkNotNull(containerType, "containerType cannot be null"); - - PropertyUtils propertyUtils = new PropertyUtils(); - propertyUtils.setBeanAccess(BeanAccess.FIELD); - propertyUtils.setAllowReadOnlyProperties(true); - - switch(containerType) { - case KeyValueContainer: - Representer representer = new ContainerDataRepresenter(); - representer.setPropertyUtils(propertyUtils); - representer.addClassTag(KeyValueContainerData.class, - KeyValueContainerData.KEYVALUE_YAML_TAG); - - Constructor keyValueDataConstructor = new ContainerDataConstructor(); - - Yaml yaml = new Yaml(keyValueDataConstructor, representer); - Writer writer = new OutputStreamWriter(new FileOutputStream( + // Write the ContainerData with checksum to Yaml file. + writer = new OutputStreamWriter(new FileOutputStream( containerFile), "UTF-8"); yaml.dump(containerData, writer); - writer.close(); - break; - default: - throw new StorageContainerException("Unrecognized container Type " + - "format " + containerType, ContainerProtos.Result - .UNKNOWN_CONTAINER_TYPE); + + } finally { + try { + if (writer != null) { + writer.close(); + } + } catch (IOException ex) { + LOG.warn("Error occurred during closing the writer. ContainerID: " + + containerData.getContainerID()); + } } } @@ -140,6 +139,39 @@ public static ContainerData readContainerFile(File containerFile) return containerData; } + /** + * Given a ContainerType this method returns a Yaml representation of + * the container properties. + * + * @param containerType type of container + * @return Yamal representation of container properties + * + * @throws StorageContainerException if the type is unrecognized + */ + public static Yaml getYamlForContainerType(ContainerType containerType) + throws StorageContainerException { + PropertyUtils propertyUtils = new PropertyUtils(); + propertyUtils.setBeanAccess(BeanAccess.FIELD); + propertyUtils.setAllowReadOnlyProperties(true); + + switch (containerType) { + case KeyValueContainer: + Representer representer = new ContainerDataRepresenter(); + representer.setPropertyUtils(propertyUtils); + representer.addClassTag( + KeyValueContainerData.class, + KeyValueContainerData.KEYVALUE_YAML_TAG); + + Constructor keyValueDataConstructor = new ContainerDataConstructor(); + + return new Yaml(keyValueDataConstructor, representer); + default: + throw new StorageContainerException("Unrecognized container Type " + + "format " + containerType, ContainerProtos.Result + .UNKNOWN_CONTAINER_TYPE); + } + } + /** * Representer class to define which fields need to be stored in yaml file. */ @@ -192,8 +224,9 @@ public Object construct(Node node) { int maxSize = (int) size; //When a new field is added, it needs to be added here. - KeyValueContainerData kvData = new KeyValueContainerData((long) nodes - .get(OzoneConsts.CONTAINER_ID), lv, maxSize); + KeyValueContainerData kvData = new KeyValueContainerData( + (long) nodes.get(OzoneConsts.CONTAINER_ID), lv, maxSize); + kvData.setContainerDBType((String)nodes.get( OzoneConsts.CONTAINER_DB_TYPE)); kvData.setMetadataPath((String) nodes.get( @@ -201,6 +234,7 @@ public Object construct(Node node) { kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); Map meta = (Map) nodes.get(OzoneConsts.METADATA); kvData.setMetadata(meta); + kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM)); String state = (String) nodes.get(OzoneConsts.STATE); switch (state) { case "OPEN": @@ -215,7 +249,7 @@ public Object construct(Node node) { default: throw new IllegalStateException("Unexpected " + "ContainerLifeCycleState " + state + " for the containerId " + - (long) nodes.get(OzoneConsts.CONTAINER_ID)); + nodes.get(OzoneConsts.CONTAINER_ID)); } return kvData; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index f381e24c742..14f731aa388 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -24,12 +24,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerLifeCycleState; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos + .ContainerType; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -49,10 +49,7 @@ import org.slf4j.LoggerFactory; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.Writer; import java.util.Map; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -113,26 +110,24 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy .getVolumesList(), maxSize); String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); - long containerId = containerData.getContainerID(); - String containerName = Long.toString(containerId); + long containerID = containerData.getContainerID(); containerMetaDataPath = KeyValueContainerLocationUtil - .getContainerMetaDataPath(hddsVolumeDir, scmId, containerId); + .getContainerMetaDataPath(hddsVolumeDir, scmId, containerID); File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath( - hddsVolumeDir, scmId, containerId); + hddsVolumeDir, scmId, containerID); + File containerFile = KeyValueContainerLocationUtil.getContainerFile( - containerMetaDataPath, containerName); - File containerCheckSumFile = KeyValueContainerLocationUtil - .getContainerCheckSumFile(containerMetaDataPath, containerName); + containerMetaDataPath, containerID); File dbFile = KeyValueContainerLocationUtil.getContainerDBFile( - containerMetaDataPath, containerName); + containerMetaDataPath, containerID); // Check if it is new Container. ContainerUtils.verifyIsNewContainer(containerMetaDataPath); //Create Metadata path chunks path and metadata db KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath, - chunksPath, dbFile, containerName, config); + chunksPath, dbFile, config); String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); @@ -144,9 +139,8 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy containerData.setDbFile(dbFile); containerData.setVolume(containerVolume); - // Create .container file and .chksm file - writeToContainerFile(containerFile, containerCheckSumFile, true); - + // Create .container file + writeToContainerFile(containerFile, true); } catch (StorageContainerException ex) { if (containerMetaDataPath != null && containerMetaDataPath.getParentFile() @@ -176,97 +170,64 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy * Creates .container file and checksum file. * * @param containerFile - * @param checksumFile * @param isCreate true if we are creating a new container file and false if * we are updating an existing container file. * @throws StorageContainerException */ - private void writeToContainerFile(File containerFile, File - checksumFile, boolean isCreate) + private void writeToContainerFile(File containerFile, boolean isCreate) throws StorageContainerException { File tempContainerFile = null; - File tempChecksumFile = null; - FileOutputStream containerCheckSumStream = null; - Writer writer = null; long containerId = containerData.getContainerID(); try { tempContainerFile = createTempFile(containerFile); - tempChecksumFile = createTempFile(checksumFile); - ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType - .KeyValueContainer, tempContainerFile, containerData); - - //Compute Checksum for container file - String checksum = KeyValueContainerUtil.computeCheckSum(containerId, - tempContainerFile); - containerCheckSumStream = new FileOutputStream(tempChecksumFile); - writer = new OutputStreamWriter(containerCheckSumStream, "UTF-8"); - writer.write(checksum); - writer.flush(); + ContainerDataYaml.createContainerFile( + ContainerType.KeyValueContainer, containerData, tempContainerFile); if (isCreate) { // When creating a new container, .container file should not exist // already. NativeIO.renameTo(tempContainerFile, containerFile); - NativeIO.renameTo(tempChecksumFile, checksumFile); } else { // When updating a container, the .container file should exist. If // not, the container is in an inconsistent state. Files.move(tempContainerFile.toPath(), containerFile.toPath(), StandardCopyOption.REPLACE_EXISTING); - Files.move(tempChecksumFile.toPath(), checksumFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); } } catch (IOException ex) { throw new StorageContainerException("Error during creation of " + - "required files(.container, .chksm) for container. ContainerID: " - + containerId, ex, CONTAINER_FILES_CREATE_ERROR); + ".container file. ContainerID: " + containerId, ex, + CONTAINER_FILES_CREATE_ERROR); } finally { - IOUtils.closeStream(containerCheckSumStream); if (tempContainerFile != null && tempContainerFile.exists()) { if (!tempContainerFile.delete()) { LOG.warn("Unable to delete container temporary file: {}.", tempContainerFile.getAbsolutePath()); } } - if (tempChecksumFile != null && tempChecksumFile.exists()) { - if (!tempChecksumFile.delete()) { - LOG.warn("Unable to delete container temporary checksum file: {}.", - tempContainerFile.getAbsolutePath()); - } - } - try { - if (writer != null) { - writer.close(); - } - } catch (IOException ex) { - LOG.warn("Error occurred during closing the writer. Container " + - "Name:" + containerId); - } - } } - private void updateContainerFile(File containerFile, File - checksumFile) throws StorageContainerException { + private void updateContainerFile(File containerFile) + throws StorageContainerException { long containerId = containerData.getContainerID(); - if (containerFile.exists() && checksumFile.exists()) { - try { - writeToContainerFile(containerFile, checksumFile, false); - } catch (IOException e) { - //TODO : Container update failure is not handled currently. Might - // lead to loss of .container file. When Update container feature - // support is added, this failure should also be handled. - throw new StorageContainerException("Container update failed. " + - "ContainerID: " + containerId, CONTAINER_FILES_CREATE_ERROR); - } - } else { + if (!containerFile.exists()) { throw new StorageContainerException("Container is an Inconsistent " + - "state, missing required files(.container, .chksm). ContainerID: " + - containerId, INVALID_CONTAINER_STATE); + "state, missing .container file. ContainerID: " + containerId, + INVALID_CONTAINER_STATE); + } + + try { + writeToContainerFile(containerFile, false); + } catch (IOException e) { + //TODO : Container update failure is not handled currently. Might + // lead to loss of .container file. When Update container feature + // support is added, this failure should also be handled. + throw new StorageContainerException("Container update failed. " + + "ContainerID: " + containerId, CONTAINER_FILES_CREATE_ERROR); } } @@ -305,10 +266,9 @@ public void close() throws StorageContainerException { } containerData.closeContainer(); File containerFile = getContainerFile(); - File containerCheckSumFile = getContainerCheckSumFile(); // update the new container data to .container File - updateContainerFile(containerFile, containerCheckSumFile); + updateContainerFile(containerFile); } catch (StorageContainerException ex) { throw ex; @@ -340,8 +300,8 @@ public ContainerLifeCycleState getContainerState() { } @Override - public ContainerProtos.ContainerType getContainerType() { - return ContainerProtos.ContainerType.KeyValueContainer; + public ContainerType getContainerType() { + return ContainerType.KeyValueContainer; } @Override @@ -369,10 +329,10 @@ public void update(Map metadata, boolean forceUpdate) for (Map.Entry entry : metadata.entrySet()) { containerData.addMetadata(entry.getKey(), entry.getValue()); } + File containerFile = getContainerFile(); - File containerCheckSumFile = getContainerCheckSumFile(); // update the new container data to .container File - updateContainerFile(containerFile, containerCheckSumFile); + updateContainerFile(containerFile); } catch (StorageContainerException ex) { // TODO: // On error, reset the metadata. @@ -460,15 +420,6 @@ private File getContainerFile() { .getContainerID() + OzoneConsts.CONTAINER_EXTENSION); } - /** - * Returns container checksum file. - * @return container checksum file - */ - private File getContainerCheckSumFile() { - return new File(containerData.getMetadataPath(), containerData - .getContainerID() + OzoneConsts.CONTAINER_FILE_CHECKSUM_EXTENSION); - } - /** * Creates a temporary file. * @param file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 3e3cc7783cb..34035c80887 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; +import java.util.Collections; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -34,13 +35,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE; -import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION; -import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE_GB; -import static org.apache.hadoop.ozone.OzoneConsts.METADATA; import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; -import static org.apache.hadoop.ozone.OzoneConsts.STATE; /** * This class represents the KeyValueContainer metadata, which is the @@ -53,17 +48,7 @@ public class KeyValueContainerData extends ContainerData { public static final Tag KEYVALUE_YAML_TAG = new Tag("KeyValueContainerData"); // Fields need to be stored in .container file. - private static final List YAML_FIELDS = - Lists.newArrayList( - CONTAINER_TYPE, - CONTAINER_ID, - LAYOUTVERSION, - STATE, - METADATA, - METADATA_PATH, - CHUNKS_PATH, - CONTAINER_DB_TYPE, - MAX_SIZE_GB); + private static final List KV_YAML_FIELDS; // Path to Container metadata Level DB/RocksDB Store and .container file. private String metadataPath; @@ -76,6 +61,15 @@ public class KeyValueContainerData extends ContainerData { private File dbFile = null; + static { + // Initialize YAML fields + KV_YAML_FIELDS = Lists.newArrayList(); + KV_YAML_FIELDS.addAll(YAML_FIELDS); + KV_YAML_FIELDS.add(METADATA_PATH); + KV_YAML_FIELDS.add(CHUNKS_PATH); + KV_YAML_FIELDS.add(CONTAINER_DB_TYPE); + } + /** * Constructs KeyValueContainerData object. * @param id - ContainerId @@ -210,7 +204,7 @@ public ContainerProtos.ContainerData getProtoBufMessage() { } public static List getYamlFields() { - return YAML_FIELDS; + return Collections.unmodifiableList(KV_YAML_FIELDS); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index 868b9f4296b..02a8e73cbbe 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -101,42 +101,26 @@ private static String getContainerSubDirectory(long containerId){ /** * Returns containerFile. * @param containerMetaDataPath - * @param containerName + * @param containerID * @return .container File name */ - public static File getContainerFile(File containerMetaDataPath, String - containerName) { + public static File getContainerFile(File containerMetaDataPath, + long containerID) { Preconditions.checkNotNull(containerMetaDataPath); - Preconditions.checkNotNull(containerName); - return new File(containerMetaDataPath, containerName + + return new File(containerMetaDataPath, containerID + OzoneConsts.CONTAINER_EXTENSION); } /** * Return containerDB File. * @param containerMetaDataPath - * @param containerName + * @param containerID * @return containerDB File name */ - public static File getContainerDBFile(File containerMetaDataPath, String - containerName) { + public static File getContainerDBFile(File containerMetaDataPath, + long containerID) { Preconditions.checkNotNull(containerMetaDataPath); - Preconditions.checkNotNull(containerName); - return new File(containerMetaDataPath, containerName + OzoneConsts + return new File(containerMetaDataPath, containerID + OzoneConsts .DN_CONTAINER_DB); } - - /** - * Returns container checksum file. - * @param containerMetaDataPath - * @param containerName - * @return container checksum file - */ - public static File getContainerCheckSumFile(File containerMetaDataPath, - String containerName) { - Preconditions.checkNotNull(containerMetaDataPath); - Preconditions.checkNotNull(containerName); - return new File(containerMetaDataPath, containerName + OzoneConsts - .CONTAINER_FILE_CHECKSUM_EXTENSION); - } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 185611116a4..2352cf64465 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -18,23 +18,16 @@ package org.apache.hadoop.ozone.container.keyvalue.helpers; import com.google.common.base.Preconditions; -import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.KeyData; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataStore; @@ -43,18 +36,12 @@ import org.slf4j.LoggerFactory; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; import java.util.List; import java.util.Map; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*; - /** * Class which defines utility methods for KeyValueContainer. */ @@ -77,10 +64,8 @@ private KeyValueContainerUtil() { * @throws IOException */ public static void createContainerMetaData(File containerMetaDataPath, File - chunksPath, File dbFile, String containerName, Configuration conf) throws - IOException { + chunksPath, File dbFile, Configuration conf) throws IOException { Preconditions.checkNotNull(containerMetaDataPath); - Preconditions.checkNotNull(containerName); Preconditions.checkNotNull(conf); if (!containerMetaDataPath.mkdirs()) { @@ -165,107 +150,32 @@ public static ContainerCommandResponseProto getReadContainerResponse( return builder.build(); } - /** - * Compute checksum of the .container file. - * @param containerId - * @param containerFile - * @throws StorageContainerException - */ - public static String computeCheckSum(long containerId, File - containerFile) throws StorageContainerException { - Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); - MessageDigest sha; - FileInputStream containerFileStream = null; - try { - sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - } catch (NoSuchAlgorithmException e) { - throw new StorageContainerException("Unable to create Message Digest, " + - "usually this is a java configuration issue.", NO_SUCH_ALGORITHM); - } - try { - containerFileStream = new FileInputStream(containerFile); - byte[] byteArray = new byte[1024]; - int bytesCount = 0; - while ((bytesCount = containerFileStream.read(byteArray)) != -1) { - sha.update(byteArray, 0, bytesCount); - } - String checksum = DigestUtils.sha256Hex(sha.digest()); - return checksum; - } catch (IOException ex) { - throw new StorageContainerException("Error during computing checksum: " + - "for container " + containerId, ex, CONTAINER_CHECKSUM_ERROR); - } finally { - IOUtils.closeStream(containerFileStream); - } - } - - /** - * Verify checksum of the container. - * @param containerId - * @param checksumFile - * @param checksum - * @throws StorageContainerException - */ - public static void verifyCheckSum(long containerId, File checksumFile, - String checksum) - throws StorageContainerException { - try { - Preconditions.checkNotNull(checksum); - Preconditions.checkNotNull(checksumFile); - Path path = Paths.get(checksumFile.getAbsolutePath()); - List fileCheckSum = Files.readAllLines(path); - Preconditions.checkState(fileCheckSum.size() == 1, "checksum " + - "should be 32 byte string"); - if (!checksum.equals(fileCheckSum.get(0))) { - LOG.error("Checksum mismatch for the container {}", containerId); - throw new StorageContainerException("Checksum mismatch for " + - "the container " + containerId, CHECKSUM_MISMATCH); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Error during verify checksum for container {}", containerId); - throw new StorageContainerException("Error during verify checksum" + - " for container " + containerId, IO_EXCEPTION); - } - } - /** * Parse KeyValueContainerData and verify checksum. - * @param containerData - * @param containerFile - * @param checksumFile - * @param dbFile + * @param kvContainerData * @param config * @throws IOException */ - public static void parseKeyValueContainerData( - KeyValueContainerData containerData, File containerFile, File - checksumFile, File dbFile, OzoneConfiguration config) throws IOException { + public static void parseKVContainerData(KeyValueContainerData kvContainerData, + OzoneConfiguration config) throws IOException { - Preconditions.checkNotNull(containerData, "containerData cannot be null"); - Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); - Preconditions.checkNotNull(checksumFile, "checksumFile cannot be null"); - Preconditions.checkNotNull(dbFile, "dbFile cannot be null"); - Preconditions.checkNotNull(config, "ozone config cannot be null"); - - long containerId = containerData.getContainerID(); - String containerName = String.valueOf(containerId); - File metadataPath = new File(containerData.getMetadataPath()); - - Preconditions.checkNotNull(containerName, "container Name cannot be " + - "null"); - Preconditions.checkNotNull(metadataPath, "metadata path cannot be " + - "null"); + long containerID = kvContainerData.getContainerID(); + File metadataPath = new File(kvContainerData.getMetadataPath()); // Verify Checksum - String checksum = KeyValueContainerUtil.computeCheckSum( - containerData.getContainerID(), containerFile); - KeyValueContainerUtil.verifyCheckSum(containerId, checksumFile, checksum); + ContainerUtils.verifyChecksum(kvContainerData); - containerData.setDbFile(dbFile); + File dbFile = KeyValueContainerLocationUtil.getContainerDBFile( + metadataPath, containerID); + if (!dbFile.exists()) { + LOG.error("Container DB file is missing for ContainerID {}. " + + "Skipping loading of this container.", containerID); + // Don't further process this container, as it is missing db file. + return; + } + kvContainerData.setDbFile(dbFile); - MetadataStore metadata = KeyUtils.getDB(containerData, config); + MetadataStore metadata = KeyUtils.getDB(kvContainerData, config); long bytesUsed = 0; List> liveKeys = metadata .getRangeKVs(null, Integer.MAX_VALUE, @@ -279,8 +189,8 @@ public static void parseKeyValueContainerData( return 0L; } }).sum(); - containerData.setBytesUsed(bytesUsed); - containerData.setKeyCount(liveKeys.size()); + kvContainerData.setBytesUsed(bytesUsed); + kvContainerData.setKeyCount(liveKeys.size()); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index c1595b241e1..dc33f2e2fcb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -20,6 +20,9 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.container.common.impl.ContainerData; @@ -38,7 +41,6 @@ import java.io.FileFilter; import java.io.IOException; - /** * Class used to read .container files from Volume and build container map. * @@ -46,15 +48,19 @@ * * ../hdds/VERSION * ../hdds/<>/current/<>/</metadata/<>.container - * ../hdds/<>/current/<>/</metadata/<>.checksum - * ../hdds/<>/current/<>/</metadata/<>.db * ../hdds/<>/current/<>/</<> * + * Some ContainerTypes will have extra metadata other than the .container + * file. For example, KeyValueContainer will have a .db file. This .db file + * will also be stored in the metadata folder along with the .container file. + * + * ../hdds/<>/current/<>/</metadata/<>.db + * * Note that the <> is dependent on the ContainerType. * For KeyValueContainers, the data is stored in a "chunks" folder. As such, * the <> layout for KeyValueContainers is * - * ../hdds/<>/current/<>/</chunks/<> + * ../hdds/<>/current/<>/</chunks/<> * */ public class ContainerReader implements Runnable { @@ -124,22 +130,19 @@ public boolean accept(File pathname) { for (File containerDir : containerDirs) { File metadataPath = new File(containerDir + File.separator + OzoneConsts.CONTAINER_META_PATH); - String containerName = containerDir.getName(); + long containerID = Long.parseLong(containerDir.getName()); if (metadataPath.exists()) { File containerFile = KeyValueContainerLocationUtil - .getContainerFile(metadataPath, containerName); - File checksumFile = KeyValueContainerLocationUtil - .getContainerCheckSumFile(metadataPath, containerName); - if (containerFile.exists() && checksumFile.exists()) { - verifyContainerFile(containerName, containerFile, - checksumFile); + .getContainerFile(metadataPath, containerID); + if (containerFile.exists()) { + verifyContainerFile(containerID, containerFile); } else { - LOG.error("Missing container metadata files for " + - "Container: {}", containerName); + LOG.error("Missing .container file for ContainerID: {}", + containerID); } } else { LOG.error("Missing container metadata directory for " + - "Container: {}", containerName); + "ContainerID: {}", containerID); } } } @@ -149,39 +152,46 @@ public boolean accept(File pathname) { } } - private void verifyContainerFile(String containerName, File containerFile, - File checksumFile) { + private void verifyContainerFile(long containerID, File containerFile) { try { - ContainerData containerData = ContainerDataYaml.readContainerFile( + ContainerData containerData = ContainerDataYaml.readContainerFile( containerFile); - - switch (containerData.getContainerType()) { - case KeyValueContainer: - KeyValueContainerData keyValueContainerData = (KeyValueContainerData) - containerData; - containerData.setVolume(hddsVolume); - File dbFile = KeyValueContainerLocationUtil - .getContainerDBFile(new File(containerFile.getParent()), - containerName); - if (!dbFile.exists()) { - LOG.error("Container DB file is missing for Container {}, skipping " + - "this", containerName); - // Don't further process this container, as it is missing db file. - return; - } - KeyValueContainerUtil.parseKeyValueContainerData(keyValueContainerData, - containerFile, checksumFile, dbFile, config); - KeyValueContainer keyValueContainer = new KeyValueContainer( - keyValueContainerData, config); - containerSet.addContainer(keyValueContainer); - break; - default: - LOG.error("Unrecognized ContainerType {} format during verify " + - "ContainerFile", containerData.getContainerType()); + if (containerID != containerData.getContainerID()) { + LOG.error("Invalid ContainerID in file {}. " + + "Skipping loading of this container.", containerFile); + return; } + verifyContainerData(containerData); } catch (IOException ex) { - LOG.error("Error during reading container file {}", containerFile); + LOG.error("Failed to parse ContainerFile for ContainerID: {}", + containerID, ex); } } + public void verifyContainerData(ContainerData containerData) + throws IOException { + switch (containerData.getContainerType()) { + case KeyValueContainer: + if (containerData instanceof KeyValueContainerData) { + KeyValueContainerData kvContainerData = (KeyValueContainerData) + containerData; + containerData.setVolume(hddsVolume); + + KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); + KeyValueContainer kvContainer = new KeyValueContainer( + kvContainerData, config); + containerSet.addContainer(kvContainer); + } else { + throw new StorageContainerException("Container File is corrupted. " + + "ContainerType is KeyValueContainer but cast to " + + "KeyValueContainerData failed. ", + ContainerProtos.Result.CONTAINER_METADATA_ERROR); + } + break; + default: + throw new StorageContainerException("Unrecognized ContainerType " + + containerData.getContainerType(), + ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE); + } + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java index d734271f8a6..fd51db3d9b9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; @@ -37,39 +38,58 @@ */ public class TestContainerDataYaml { - private static final int MAXSIZE = 5; - @Test - public void testCreateContainerFile() throws IOException { - String path = new FileSystemTestHelper().getTestRootDir(); - String containerPath = "1.container"; + private static long testContainerID = 1234; - File filePath = new File(new FileSystemTestHelper().getTestRootDir()); - filePath.mkdirs(); + private static String testRoot = new FileSystemTestHelper().getTestRootDir(); + + private static final int MAXSIZE = 5; + + /** + * Creates a .container file. cleanup() should be called at the end of the + * test when container file is created. + */ + private File createContainerFile(long containerID) throws IOException { + new File(testRoot).mkdirs(); + + String containerPath = containerID + ".container"; KeyValueContainerData keyValueContainerData = new KeyValueContainerData( - Long.MAX_VALUE, MAXSIZE); + containerID, MAXSIZE); keyValueContainerData.setContainerDBType("RocksDB"); - keyValueContainerData.setMetadataPath(path); - keyValueContainerData.setChunksPath(path); + keyValueContainerData.setMetadataPath(testRoot); + keyValueContainerData.setChunksPath(testRoot); - File containerFile = new File(filePath, containerPath); + File containerFile = new File(testRoot, containerPath); // Create .container file with ContainerData ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType - .KeyValueContainer, containerFile, keyValueContainerData); + .KeyValueContainer, keyValueContainerData, containerFile); //Check .container file exists or not. assertTrue(containerFile.exists()); + return containerFile; + } + + private void cleanup() { + FileUtil.fullyDelete(new File(testRoot)); + } + + @Test + public void testCreateContainerFile() throws IOException { + long containerID = testContainerID++; + + File containerFile = createContainerFile(containerID); + // Read from .container file, and verify data. KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml .readContainerFile(containerFile); - assertEquals(Long.MAX_VALUE, kvData.getContainerID()); + assertEquals(containerID, kvData.getContainerID()); assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData .getContainerType()); assertEquals("RocksDB", kvData.getContainerDBType()); - assertEquals(path, kvData.getMetadataPath()); - assertEquals(path, kvData.getChunksPath()); + assertEquals(containerFile.getParent(), kvData.getMetadataPath()); + assertEquals(containerFile.getParent(), kvData.getChunksPath()); assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData .getState()); assertEquals(1, kvData.getLayOutVersion()); @@ -82,22 +102,20 @@ public void testCreateContainerFile() throws IOException { kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED); - // Update .container file with new ContainerData. - containerFile = new File(filePath, containerPath); ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType - .KeyValueContainer, containerFile, kvData); + .KeyValueContainer, kvData, containerFile); // Reading newly updated data from .container file kvData = (KeyValueContainerData) ContainerDataYaml.readContainerFile( containerFile); // verify data. - assertEquals(Long.MAX_VALUE, kvData.getContainerID()); + assertEquals(containerID, kvData.getContainerID()); assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData .getContainerType()); assertEquals("RocksDB", kvData.getContainerDBType()); - assertEquals(path, kvData.getMetadataPath()); - assertEquals(path, kvData.getChunksPath()); + assertEquals(containerFile.getParent(), kvData.getMetadataPath()); + assertEquals(containerFile.getParent(), kvData.getChunksPath()); assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData .getState()); assertEquals(1, kvData.getLayOutVersion()); @@ -105,19 +123,15 @@ public void testCreateContainerFile() throws IOException { assertEquals("hdfs", kvData.getMetadata().get("VOLUME")); assertEquals("ozone", kvData.getMetadata().get("OWNER")); assertEquals(MAXSIZE, kvData.getMaxSizeGB()); - - FileUtil.fullyDelete(filePath); - - } @Test public void testIncorrectContainerFile() throws IOException{ try { - String path = "incorrect.container"; + String containerFile = "incorrect.container"; //Get file from resources folder ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(path).getFile()); + File file = new File(classLoader.getResource(containerFile).getFile()); KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml .readContainerFile(file); fail("testIncorrectContainerFile failed"); @@ -137,12 +151,13 @@ public void testCheckBackWardCompatabilityOfContainerFile() throws // created or not. try { - String path = "additionalfields.container"; + String containerFile = "additionalfields.container"; //Get file from resources folder ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(path).getFile()); + File file = new File(classLoader.getResource(containerFile).getFile()); KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml .readContainerFile(file); + ContainerUtils.verifyChecksum(kvData); //Checking the Container file data is consistent or not assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData @@ -159,9 +174,45 @@ public void testCheckBackWardCompatabilityOfContainerFile() throws assertEquals(2, kvData.getMetadata().size()); } catch (Exception ex) { + ex.printStackTrace(); fail("testCheckBackWardCompatabilityOfContainerFile failed"); } } + /** + * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}. + */ + @Test + public void testChecksumInContainerFile() throws IOException { + long containerID = testContainerID++; + File containerFile = createContainerFile(containerID); + + // Read from .container file, and verify data. + KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml + .readContainerFile(containerFile); + ContainerUtils.verifyChecksum(kvData); + + cleanup(); + } + + /** + * Test to verify incorrect checksum is detected. + */ + @Test + public void testIncorrectChecksum() { + try { + String containerFile = "incorrect.checksum.container"; + //Get file from resources folder + ClassLoader classLoader = getClass().getClassLoader(); + File file = new File(classLoader.getResource(containerFile).getFile()); + KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml + .readContainerFile(file); + ContainerUtils.verifyChecksum(kvData); + fail("testIncorrectChecksum failed"); + } catch (Exception ex) { + GenericTestUtils.assertExceptionContains("Container checksum error for " + + "ContainerID:", ex); + } + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 4f00507dd62..2bf41e584f2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -69,8 +69,7 @@ public class TestKeyValueContainer { private String scmId = UUID.randomUUID().toString(); private VolumeSet volumeSet; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private long containerId = 1L; - private String containerName = String.valueOf(containerId); + private long containerID = 1L; private KeyValueContainerData keyValueContainerData; private KeyValueContainer keyValueContainer; @@ -111,16 +110,12 @@ public void testCreateContainer() throws Exception { assertTrue(chunksPath != null); File containerMetaDataLoc = new File(containerMetaDataPath); - //Check whether container file, check sum file and container db file exists - // or not. + //Check whether container file and container db file exists or not. assertTrue(KeyValueContainerLocationUtil.getContainerFile( - containerMetaDataLoc, containerName).exists(), ".Container File does" + + containerMetaDataLoc, containerID).exists(), ".Container File does" + " not exist"); - assertTrue(KeyValueContainerLocationUtil.getContainerCheckSumFile( - containerMetaDataLoc, containerName).exists(), "Container check sum " + - "File does" + " not exist"); assertTrue(KeyValueContainerLocationUtil.getContainerDBFile( - containerMetaDataLoc, containerName).exists(), "Container DB does " + + containerMetaDataLoc, containerID).exists(), "Container DB does " + "not exist"); } @@ -172,10 +167,10 @@ public void testDeleteContainer() throws Exception { assertFalse("Container File still exists", KeyValueContainerLocationUtil.getContainerFile(containerMetaDataLoc, - containerName).exists()); + containerID).exists()); assertFalse("Container DB file still exists", KeyValueContainerLocationUtil.getContainerDBFile(containerMetaDataLoc, - containerName).exists()); + containerID).exists()); } @@ -195,7 +190,7 @@ public void testCloseContainer() throws Exception { .getMetadataPath(); File containerMetaDataLoc = new File(containerMetaDataPath); File containerFile = KeyValueContainerLocationUtil.getContainerFile( - containerMetaDataLoc, containerName); + containerMetaDataLoc, containerID); keyValueContainerData = (KeyValueContainerData) ContainerDataYaml .readContainerFile(containerFile); @@ -236,7 +231,7 @@ public void testUpdateContainer() throws IOException { .getMetadataPath(); File containerMetaDataLoc = new File(containerMetaDataPath); File containerFile = KeyValueContainerLocationUtil.getContainerFile( - containerMetaDataLoc, containerName); + containerMetaDataLoc, containerID); keyValueContainerData = (KeyValueContainerData) ContainerDataYaml .readContainerFile(containerFile); diff --git a/hadoop-hdds/container-service/src/test/resources/additionalfields.container b/hadoop-hdds/container-service/src/test/resources/additionalfields.container index f437a95409b..73cf5f3f691 100644 --- a/hadoop-hdds/container-service/src/test/resources/additionalfields.container +++ b/hadoop-hdds/container-service/src/test/resources/additionalfields.container @@ -8,4 +8,5 @@ layOutVersion: 1 maxSizeGB: 5 metadata: {OWNER: ozone, VOLUME: hdfs} state: CLOSED -aclEnabled: true \ No newline at end of file +aclEnabled: true +checksum: 1bbff32aeaa8fadc0b80c5c1e0597036e96acd8ae4bddbed188a2162762251a2 \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container b/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container new file mode 100644 index 00000000000..feeeadc21fe --- /dev/null +++ b/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container @@ -0,0 +1,11 @@ +! +containerDBType: RocksDB +chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 +containerID: 9223372036854775807 +containerType: KeyValueContainer +metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 +layOutVersion: 1 +maxSizeGB: 5 +metadata: {OWNER: ozone, VOLUME: hdfs} +state: OPEN +checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.container b/hadoop-hdds/container-service/src/test/resources/incorrect.container index 38a8857cd1f..8aeb30c167e 100644 --- a/hadoop-hdds/container-service/src/test/resources/incorrect.container +++ b/hadoop-hdds/container-service/src/test/resources/incorrect.container @@ -7,4 +7,5 @@ metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 layOutVersion: 1 maxSizeGB: 5 metadata: {OWNER: ozone, VOLUME: hdfs} -state: INVALID \ No newline at end of file +state: INVALID +checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f \ No newline at end of file diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 09fac338df0..f655c2fffcc 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -104,6 +104,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/main/resources/webapps/static/nvd3-1.8.5.min.js.map src/test/resources/incorrect.container src/test/resources/additionalfields.container + src/test/resources/incorrect.checksum.container diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index d29937e0bcc..c2e16459722 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -716,7 +716,7 @@ public void testUpdateContainer() throws IOException { File orgContainerFile = KeyValueContainerLocationUtil.getContainerFile( new File(container.getContainerData().getMetadataPath()), - String.valueOf(testContainerID)); + testContainerID); Assert.assertTrue(orgContainerFile.exists()); Map newMetadata = Maps.newHashMap(); @@ -740,7 +740,7 @@ public void testUpdateContainer() throws IOException { // Verify container data on disk File newContainerFile = KeyValueContainerLocationUtil.getContainerFile( new File(actualNewData.getMetadataPath()), - String.valueOf(testContainerID)); + testContainerID); Assert.assertTrue("Container file should exist.", newContainerFile.exists()); Assert.assertEquals("Container file should be in same location.", @@ -780,8 +780,8 @@ public void testUpdateContainer() throws IOException { // Update a non-existing container exception.expect(StorageContainerException.class); - exception.expectMessage("Container is an Inconsistent state, missing " + - "required files(.container, .chksm)."); + exception.expectMessage("Container is an Inconsistent " + + "state, missing .container file."); Container nonExistentContainer = new KeyValueContainer( new KeyValueContainerData(RandomUtils.nextLong(), ContainerTestHelper.CONTAINER_MAX_SIZE_GB), conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 152228331fa..d271ed39665 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -33,11 +33,14 @@ import org.junit.Assert; import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TemporaryFolder; import org.junit.rules.Timeout; import java.util.*; import java.util.concurrent.CompletableFuture; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; + /** * Tests ozone containers. */ @@ -48,6 +51,9 @@ public class TestOzoneContainer { @Rule public Timeout testTimeout = new Timeout(300000); + @Rule + public TemporaryFolder tempFolder = new TemporaryFolder(); + @Test public void testCreateOzoneContainer() throws Exception { long containerID = ContainerTestHelper.getTestContainerID(); @@ -60,6 +66,7 @@ public void testCreateOzoneContainer() throws Exception { // We don't start Ozone Container via data node, we will do it // independently in our test path. Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); + conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath()); conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);