HDDS-266. Integrate checksum into .container file. Contributed by Hanisha Koneru.

This commit is contained in:
Nanda kumar 2018-07-25 18:55:26 +05:30
parent 43db0cb518
commit b507f83e15
17 changed files with 431 additions and 362 deletions

View File

@ -98,7 +98,6 @@ public final class OzoneConsts {
public static final String OM_DB_NAME = "om.db"; public static final String OM_DB_NAME = "om.db";
public static final String STORAGE_DIR_CHUNKS = "chunks"; public static final String STORAGE_DIR_CHUNKS = "chunks";
public static final String CONTAINER_FILE_CHECKSUM_EXTENSION = ".chksm";
/** /**
* Supports Bucket Versioning. * Supports Bucket Versioning.
@ -190,4 +189,5 @@ public final class OzoneConsts {
public static final String METADATA_PATH = "metadataPath"; public static final String METADATA_PATH = "metadataPath";
public static final String CHUNKS_PATH = "chunksPath"; public static final String CHUNKS_PATH = "chunksPath";
public static final String CONTAINER_DB_TYPE = "containerDBType"; public static final String CONTAINER_DB_TYPE = "containerDBType";
public static final String CHECKSUM = "checksum";
} }

View File

@ -19,9 +19,11 @@
package org.apache.hadoop.ozone.container.common.helpers; package org.apache.hadoop.ozone.container.common.helpers;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto; .ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@ -31,8 +33,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException; .StorageContainerException;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -43,8 +45,17 @@ import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import org.yaml.snakeyaml.Yaml;
import static org.apache.commons.io.FilenameUtils.removeExtension; import static org.apache.commons.io.FilenameUtils.removeExtension;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CONTAINER_CHECKSUM_ERROR;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.NO_SUCH_ALGORITHM;
import static org.apache.hadoop.ozone.container.common.impl.ContainerData
.CHARSET_ENCODING;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
/** /**
* A set of helper functions to create proper responses. * A set of helper functions to create proper responses.
@ -245,4 +256,48 @@ public final class ContainerUtils {
+ path.getAbsolutePath(), e); + path.getAbsolutePath(), e);
} }
} }
/**
* Verify that the checksum stored in containerData is equal to the
* computed checksum.
* @param containerData
* @throws IOException
*/
public static void verifyChecksum(ContainerData containerData)
throws IOException {
String storedChecksum = containerData.getChecksum();
Yaml yaml = ContainerDataYaml.getYamlForContainerType(
containerData.getContainerType());
containerData.computeAndSetChecksum(yaml);
String computedChecksum = containerData.getChecksum();
if (storedChecksum == null || !storedChecksum.equals(computedChecksum)) {
throw new StorageContainerException("Container checksum error for " +
"ContainerID: " + containerData.getContainerID() + ". " +
"\nStored Checksum: " + storedChecksum +
"\nExpected Checksum: " + computedChecksum,
CONTAINER_CHECKSUM_ERROR);
}
}
/**
* Return the SHA-256 chesksum of the containerData.
* @param containerDataYamlStr ContainerData as a Yaml String
* @return Checksum of the container data
* @throws StorageContainerException
*/
public static String getChecksum(String containerDataYamlStr)
throws StorageContainerException {
MessageDigest sha;
try {
sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
sha.update(containerDataYamlStr.getBytes(CHARSET_ENCODING));
return DigestUtils.sha256Hex(sha.digest());
} catch (NoSuchAlgorithmException e) {
throw new StorageContainerException("Unable to create Message Digest, " +
"usually this is a java configuration issue.", NO_SUCH_ALGORITHM);
}
}
} }

View File

@ -18,22 +18,33 @@
package org.apache.hadoop.ozone.container.common.impl; package org.apache.hadoop.ozone.container.common.impl;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List; import java.util.List;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
ContainerType; ContainerType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
ContainerLifeCycleState; ContainerLifeCycleState;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.yaml.snakeyaml.Yaml;
import static java.lang.Math.max; import static java.lang.Math.max;
import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE;
import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION;
import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE_GB;
import static org.apache.hadoop.ozone.OzoneConsts.METADATA;
import static org.apache.hadoop.ozone.OzoneConsts.STATE;
/** /**
* ContainerData is the in-memory representation of container metadata and is * ContainerData is the in-memory representation of container metadata and is
@ -72,6 +83,23 @@ public abstract class ContainerData {
private long deleteTransactionId; private long deleteTransactionId;
private String checksum;
public static final Charset CHARSET_ENCODING = Charset.forName("UTF-8");
private static final String DUMMY_CHECKSUM = new String(new byte[64],
CHARSET_ENCODING);
// Common Fields need to be stored in .container file.
protected static final List<String> YAML_FIELDS =
Collections.unmodifiableList(Lists.newArrayList(
CONTAINER_TYPE,
CONTAINER_ID,
LAYOUTVERSION,
STATE,
METADATA,
MAX_SIZE_GB,
CHECKSUM));
/** /**
* Number of pending deletion blocks in container. * Number of pending deletion blocks in container.
*/ */
@ -113,6 +141,7 @@ public abstract class ContainerData {
this.maxSizeGB = size; this.maxSizeGB = size;
this.numPendingDeletionBlocks = new AtomicInteger(0); this.numPendingDeletionBlocks = new AtomicInteger(0);
this.deleteTransactionId = 0; this.deleteTransactionId = 0;
setChecksumTo0ByteArray();
} }
/** /**
@ -400,6 +429,41 @@ public abstract class ContainerData {
return this.numPendingDeletionBlocks.get(); return this.numPendingDeletionBlocks.get();
} }
public void setChecksumTo0ByteArray() {
this.checksum = DUMMY_CHECKSUM;
}
public void setChecksum(String checkSum) {
this.checksum = checkSum;
}
public String getChecksum() {
return this.checksum;
}
/**
* Compute the checksum for ContainerData using the specified Yaml (based
* on ContainerType) and set the checksum.
*
* Checksum of ContainerData is calculated by setting the
* {@link ContainerData#checksum} field to a 64-byte array with all 0's -
* {@link ContainerData#DUMMY_CHECKSUM}. After the checksum is calculated,
* the checksum field is updated with this value.
*
* @param yaml Yaml for ContainerType to get the ContainerData as Yaml String
* @throws IOException
*/
public void computeAndSetChecksum(Yaml yaml) throws IOException {
// Set checksum to dummy value - 0 byte array, to calculate the checksum
// of rest of the data.
setChecksumTo0ByteArray();
// Dump yaml data into a string to compute its checksum
String containerDataYamlStr = yaml.dump(this);
this.checksum = ContainerUtils.getChecksum(containerDataYamlStr);
}
/** /**
* Returns a ProtoBuf Message from ContainerData. * Returns a ProtoBuf Message from ContainerData.
* *

View File

@ -20,10 +20,14 @@ package org.apache.hadoop.ozone.container.common.impl;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerType;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.Yaml; import org.yaml.snakeyaml.Yaml;
import java.beans.IntrospectionException; import java.beans.IntrospectionException;
@ -59,9 +63,13 @@ import static org.apache.hadoop.ozone.container.keyvalue
public final class ContainerDataYaml { public final class ContainerDataYaml {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerDataYaml.class);
private ContainerDataYaml() { private ContainerDataYaml() {
} }
/** /**
* Creates a .container file in yaml format. * Creates a .container file in yaml format.
* *
@ -69,38 +77,29 @@ public final class ContainerDataYaml {
* @param containerData * @param containerData
* @throws IOException * @throws IOException
*/ */
public static void createContainerFile(ContainerProtos.ContainerType public static void createContainerFile(ContainerType containerType,
containerType, File containerFile, ContainerData containerData, File containerFile) throws IOException {
ContainerData containerData) throws Writer writer = null;
IOException { try {
// Create Yaml for given container type
Yaml yaml = getYamlForContainerType(containerType);
// Compute Checksum and update ContainerData
containerData.computeAndSetChecksum(yaml);
Preconditions.checkNotNull(containerFile, "yamlFile cannot be null"); // Write the ContainerData with checksum to Yaml file.
Preconditions.checkNotNull(containerData, "containerData cannot be null"); writer = new OutputStreamWriter(new FileOutputStream(
Preconditions.checkNotNull(containerType, "containerType cannot be null");
PropertyUtils propertyUtils = new PropertyUtils();
propertyUtils.setBeanAccess(BeanAccess.FIELD);
propertyUtils.setAllowReadOnlyProperties(true);
switch(containerType) {
case KeyValueContainer:
Representer representer = new ContainerDataRepresenter();
representer.setPropertyUtils(propertyUtils);
representer.addClassTag(KeyValueContainerData.class,
KeyValueContainerData.KEYVALUE_YAML_TAG);
Constructor keyValueDataConstructor = new ContainerDataConstructor();
Yaml yaml = new Yaml(keyValueDataConstructor, representer);
Writer writer = new OutputStreamWriter(new FileOutputStream(
containerFile), "UTF-8"); containerFile), "UTF-8");
yaml.dump(containerData, writer); yaml.dump(containerData, writer);
writer.close();
break; } finally {
default: try {
throw new StorageContainerException("Unrecognized container Type " + if (writer != null) {
"format " + containerType, ContainerProtos.Result writer.close();
.UNKNOWN_CONTAINER_TYPE); }
} catch (IOException ex) {
LOG.warn("Error occurred during closing the writer. ContainerID: " +
containerData.getContainerID());
}
} }
} }
@ -140,6 +139,39 @@ public final class ContainerDataYaml {
return containerData; return containerData;
} }
/**
* Given a ContainerType this method returns a Yaml representation of
* the container properties.
*
* @param containerType type of container
* @return Yamal representation of container properties
*
* @throws StorageContainerException if the type is unrecognized
*/
public static Yaml getYamlForContainerType(ContainerType containerType)
throws StorageContainerException {
PropertyUtils propertyUtils = new PropertyUtils();
propertyUtils.setBeanAccess(BeanAccess.FIELD);
propertyUtils.setAllowReadOnlyProperties(true);
switch (containerType) {
case KeyValueContainer:
Representer representer = new ContainerDataRepresenter();
representer.setPropertyUtils(propertyUtils);
representer.addClassTag(
KeyValueContainerData.class,
KeyValueContainerData.KEYVALUE_YAML_TAG);
Constructor keyValueDataConstructor = new ContainerDataConstructor();
return new Yaml(keyValueDataConstructor, representer);
default:
throw new StorageContainerException("Unrecognized container Type " +
"format " + containerType, ContainerProtos.Result
.UNKNOWN_CONTAINER_TYPE);
}
}
/** /**
* Representer class to define which fields need to be stored in yaml file. * Representer class to define which fields need to be stored in yaml file.
*/ */
@ -192,8 +224,9 @@ public final class ContainerDataYaml {
int maxSize = (int) size; int maxSize = (int) size;
//When a new field is added, it needs to be added here. //When a new field is added, it needs to be added here.
KeyValueContainerData kvData = new KeyValueContainerData((long) nodes KeyValueContainerData kvData = new KeyValueContainerData(
.get(OzoneConsts.CONTAINER_ID), lv, maxSize); (long) nodes.get(OzoneConsts.CONTAINER_ID), lv, maxSize);
kvData.setContainerDBType((String)nodes.get( kvData.setContainerDBType((String)nodes.get(
OzoneConsts.CONTAINER_DB_TYPE)); OzoneConsts.CONTAINER_DB_TYPE));
kvData.setMetadataPath((String) nodes.get( kvData.setMetadataPath((String) nodes.get(
@ -201,6 +234,7 @@ public final class ContainerDataYaml {
kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH));
Map<String, String> meta = (Map) nodes.get(OzoneConsts.METADATA); Map<String, String> meta = (Map) nodes.get(OzoneConsts.METADATA);
kvData.setMetadata(meta); kvData.setMetadata(meta);
kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM));
String state = (String) nodes.get(OzoneConsts.STATE); String state = (String) nodes.get(OzoneConsts.STATE);
switch (state) { switch (state) {
case "OPEN": case "OPEN":
@ -215,7 +249,7 @@ public final class ContainerDataYaml {
default: default:
throw new IllegalStateException("Unexpected " + throw new IllegalStateException("Unexpected " +
"ContainerLifeCycleState " + state + " for the containerId " + "ContainerLifeCycleState " + state + " for the containerId " +
(long) nodes.get(OzoneConsts.CONTAINER_ID)); nodes.get(OzoneConsts.CONTAINER_ID));
} }
return kvData; return kvData;
} }

View File

@ -24,12 +24,12 @@ import java.nio.file.StandardCopyOption;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerLifeCycleState; .ContainerLifeCycleState;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerType;
import org.apache.hadoop.hdds.scm.container.common.helpers import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException; .StorageContainerException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -49,10 +49,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.File; import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Map; import java.util.Map;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
@ -113,26 +110,24 @@ public class KeyValueContainer implements Container {
.getVolumesList(), maxSize); .getVolumesList(), maxSize);
String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
long containerId = containerData.getContainerID(); long containerID = containerData.getContainerID();
String containerName = Long.toString(containerId);
containerMetaDataPath = KeyValueContainerLocationUtil containerMetaDataPath = KeyValueContainerLocationUtil
.getContainerMetaDataPath(hddsVolumeDir, scmId, containerId); .getContainerMetaDataPath(hddsVolumeDir, scmId, containerID);
File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath( File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(
hddsVolumeDir, scmId, containerId); hddsVolumeDir, scmId, containerID);
File containerFile = KeyValueContainerLocationUtil.getContainerFile( File containerFile = KeyValueContainerLocationUtil.getContainerFile(
containerMetaDataPath, containerName); containerMetaDataPath, containerID);
File containerCheckSumFile = KeyValueContainerLocationUtil
.getContainerCheckSumFile(containerMetaDataPath, containerName);
File dbFile = KeyValueContainerLocationUtil.getContainerDBFile( File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(
containerMetaDataPath, containerName); containerMetaDataPath, containerID);
// Check if it is new Container. // Check if it is new Container.
ContainerUtils.verifyIsNewContainer(containerMetaDataPath); ContainerUtils.verifyIsNewContainer(containerMetaDataPath);
//Create Metadata path chunks path and metadata db //Create Metadata path chunks path and metadata db
KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath, KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath,
chunksPath, dbFile, containerName, config); chunksPath, dbFile, config);
String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
@ -144,9 +139,8 @@ public class KeyValueContainer implements Container {
containerData.setDbFile(dbFile); containerData.setDbFile(dbFile);
containerData.setVolume(containerVolume); containerData.setVolume(containerVolume);
// Create .container file and .chksm file // Create .container file
writeToContainerFile(containerFile, containerCheckSumFile, true); writeToContainerFile(containerFile, true);
} catch (StorageContainerException ex) { } catch (StorageContainerException ex) {
if (containerMetaDataPath != null && containerMetaDataPath.getParentFile() if (containerMetaDataPath != null && containerMetaDataPath.getParentFile()
@ -176,97 +170,64 @@ public class KeyValueContainer implements Container {
* Creates .container file and checksum file. * Creates .container file and checksum file.
* *
* @param containerFile * @param containerFile
* @param checksumFile
* @param isCreate true if we are creating a new container file and false if * @param isCreate true if we are creating a new container file and false if
* we are updating an existing container file. * we are updating an existing container file.
* @throws StorageContainerException * @throws StorageContainerException
*/ */
private void writeToContainerFile(File containerFile, File private void writeToContainerFile(File containerFile, boolean isCreate)
checksumFile, boolean isCreate)
throws StorageContainerException { throws StorageContainerException {
File tempContainerFile = null; File tempContainerFile = null;
File tempChecksumFile = null;
FileOutputStream containerCheckSumStream = null;
Writer writer = null;
long containerId = containerData.getContainerID(); long containerId = containerData.getContainerID();
try { try {
tempContainerFile = createTempFile(containerFile); tempContainerFile = createTempFile(containerFile);
tempChecksumFile = createTempFile(checksumFile); ContainerDataYaml.createContainerFile(
ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType ContainerType.KeyValueContainer, containerData, tempContainerFile);
.KeyValueContainer, tempContainerFile, containerData);
//Compute Checksum for container file
String checksum = KeyValueContainerUtil.computeCheckSum(containerId,
tempContainerFile);
containerCheckSumStream = new FileOutputStream(tempChecksumFile);
writer = new OutputStreamWriter(containerCheckSumStream, "UTF-8");
writer.write(checksum);
writer.flush();
if (isCreate) { if (isCreate) {
// When creating a new container, .container file should not exist // When creating a new container, .container file should not exist
// already. // already.
NativeIO.renameTo(tempContainerFile, containerFile); NativeIO.renameTo(tempContainerFile, containerFile);
NativeIO.renameTo(tempChecksumFile, checksumFile);
} else { } else {
// When updating a container, the .container file should exist. If // When updating a container, the .container file should exist. If
// not, the container is in an inconsistent state. // not, the container is in an inconsistent state.
Files.move(tempContainerFile.toPath(), containerFile.toPath(), Files.move(tempContainerFile.toPath(), containerFile.toPath(),
StandardCopyOption.REPLACE_EXISTING); StandardCopyOption.REPLACE_EXISTING);
Files.move(tempChecksumFile.toPath(), checksumFile.toPath(),
StandardCopyOption.REPLACE_EXISTING);
} }
} catch (IOException ex) { } catch (IOException ex) {
throw new StorageContainerException("Error during creation of " + throw new StorageContainerException("Error during creation of " +
"required files(.container, .chksm) for container. ContainerID: " ".container file. ContainerID: " + containerId, ex,
+ containerId, ex, CONTAINER_FILES_CREATE_ERROR); CONTAINER_FILES_CREATE_ERROR);
} finally { } finally {
IOUtils.closeStream(containerCheckSumStream);
if (tempContainerFile != null && tempContainerFile.exists()) { if (tempContainerFile != null && tempContainerFile.exists()) {
if (!tempContainerFile.delete()) { if (!tempContainerFile.delete()) {
LOG.warn("Unable to delete container temporary file: {}.", LOG.warn("Unable to delete container temporary file: {}.",
tempContainerFile.getAbsolutePath()); tempContainerFile.getAbsolutePath());
} }
} }
if (tempChecksumFile != null && tempChecksumFile.exists()) {
if (!tempChecksumFile.delete()) {
LOG.warn("Unable to delete container temporary checksum file: {}.",
tempContainerFile.getAbsolutePath());
}
}
try {
if (writer != null) {
writer.close();
}
} catch (IOException ex) {
LOG.warn("Error occurred during closing the writer. Container " +
"Name:" + containerId);
}
} }
} }
private void updateContainerFile(File containerFile, File private void updateContainerFile(File containerFile)
checksumFile) throws StorageContainerException { throws StorageContainerException {
long containerId = containerData.getContainerID(); long containerId = containerData.getContainerID();
if (containerFile.exists() && checksumFile.exists()) { if (!containerFile.exists()) {
try {
writeToContainerFile(containerFile, checksumFile, false);
} catch (IOException e) {
//TODO : Container update failure is not handled currently. Might
// lead to loss of .container file. When Update container feature
// support is added, this failure should also be handled.
throw new StorageContainerException("Container update failed. " +
"ContainerID: " + containerId, CONTAINER_FILES_CREATE_ERROR);
}
} else {
throw new StorageContainerException("Container is an Inconsistent " + throw new StorageContainerException("Container is an Inconsistent " +
"state, missing required files(.container, .chksm). ContainerID: " + "state, missing .container file. ContainerID: " + containerId,
containerId, INVALID_CONTAINER_STATE); INVALID_CONTAINER_STATE);
}
try {
writeToContainerFile(containerFile, false);
} catch (IOException e) {
//TODO : Container update failure is not handled currently. Might
// lead to loss of .container file. When Update container feature
// support is added, this failure should also be handled.
throw new StorageContainerException("Container update failed. " +
"ContainerID: " + containerId, CONTAINER_FILES_CREATE_ERROR);
} }
} }
@ -305,10 +266,9 @@ public class KeyValueContainer implements Container {
} }
containerData.closeContainer(); containerData.closeContainer();
File containerFile = getContainerFile(); File containerFile = getContainerFile();
File containerCheckSumFile = getContainerCheckSumFile();
// update the new container data to .container File // update the new container data to .container File
updateContainerFile(containerFile, containerCheckSumFile); updateContainerFile(containerFile);
} catch (StorageContainerException ex) { } catch (StorageContainerException ex) {
throw ex; throw ex;
@ -340,8 +300,8 @@ public class KeyValueContainer implements Container {
} }
@Override @Override
public ContainerProtos.ContainerType getContainerType() { public ContainerType getContainerType() {
return ContainerProtos.ContainerType.KeyValueContainer; return ContainerType.KeyValueContainer;
} }
@Override @Override
@ -369,10 +329,10 @@ public class KeyValueContainer implements Container {
for (Map.Entry<String, String> entry : metadata.entrySet()) { for (Map.Entry<String, String> entry : metadata.entrySet()) {
containerData.addMetadata(entry.getKey(), entry.getValue()); containerData.addMetadata(entry.getKey(), entry.getValue());
} }
File containerFile = getContainerFile(); File containerFile = getContainerFile();
File containerCheckSumFile = getContainerCheckSumFile();
// update the new container data to .container File // update the new container data to .container File
updateContainerFile(containerFile, containerCheckSumFile); updateContainerFile(containerFile);
} catch (StorageContainerException ex) { } catch (StorageContainerException ex) {
// TODO: // TODO:
// On error, reset the metadata. // On error, reset the metadata.
@ -460,15 +420,6 @@ public class KeyValueContainer implements Container {
.getContainerID() + OzoneConsts.CONTAINER_EXTENSION); .getContainerID() + OzoneConsts.CONTAINER_EXTENSION);
} }
/**
* Returns container checksum file.
* @return container checksum file
*/
private File getContainerCheckSumFile() {
return new File(containerData.getMetadataPath(), containerData
.getContainerID() + OzoneConsts.CONTAINER_FILE_CHECKSUM_EXTENSION);
}
/** /**
* Creates a temporary file. * Creates a temporary file.
* @param file * @param file

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import java.util.Collections;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
@ -34,13 +35,7 @@ import java.util.Map;
import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE;
import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION;
import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE_GB;
import static org.apache.hadoop.ozone.OzoneConsts.METADATA;
import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH;
import static org.apache.hadoop.ozone.OzoneConsts.STATE;
/** /**
* This class represents the KeyValueContainer metadata, which is the * This class represents the KeyValueContainer metadata, which is the
@ -53,17 +48,7 @@ public class KeyValueContainerData extends ContainerData {
public static final Tag KEYVALUE_YAML_TAG = new Tag("KeyValueContainerData"); public static final Tag KEYVALUE_YAML_TAG = new Tag("KeyValueContainerData");
// Fields need to be stored in .container file. // Fields need to be stored in .container file.
private static final List<String> YAML_FIELDS = private static final List<String> KV_YAML_FIELDS;
Lists.newArrayList(
CONTAINER_TYPE,
CONTAINER_ID,
LAYOUTVERSION,
STATE,
METADATA,
METADATA_PATH,
CHUNKS_PATH,
CONTAINER_DB_TYPE,
MAX_SIZE_GB);
// Path to Container metadata Level DB/RocksDB Store and .container file. // Path to Container metadata Level DB/RocksDB Store and .container file.
private String metadataPath; private String metadataPath;
@ -76,6 +61,15 @@ public class KeyValueContainerData extends ContainerData {
private File dbFile = null; private File dbFile = null;
static {
// Initialize YAML fields
KV_YAML_FIELDS = Lists.newArrayList();
KV_YAML_FIELDS.addAll(YAML_FIELDS);
KV_YAML_FIELDS.add(METADATA_PATH);
KV_YAML_FIELDS.add(CHUNKS_PATH);
KV_YAML_FIELDS.add(CONTAINER_DB_TYPE);
}
/** /**
* Constructs KeyValueContainerData object. * Constructs KeyValueContainerData object.
* @param id - ContainerId * @param id - ContainerId
@ -210,7 +204,7 @@ public class KeyValueContainerData extends ContainerData {
} }
public static List<String> getYamlFields() { public static List<String> getYamlFields() {
return YAML_FIELDS; return Collections.unmodifiableList(KV_YAML_FIELDS);
} }
/** /**

View File

@ -101,42 +101,26 @@ public final class KeyValueContainerLocationUtil {
/** /**
* Returns containerFile. * Returns containerFile.
* @param containerMetaDataPath * @param containerMetaDataPath
* @param containerName * @param containerID
* @return .container File name * @return .container File name
*/ */
public static File getContainerFile(File containerMetaDataPath, String public static File getContainerFile(File containerMetaDataPath,
containerName) { long containerID) {
Preconditions.checkNotNull(containerMetaDataPath); Preconditions.checkNotNull(containerMetaDataPath);
Preconditions.checkNotNull(containerName); return new File(containerMetaDataPath, containerID +
return new File(containerMetaDataPath, containerName +
OzoneConsts.CONTAINER_EXTENSION); OzoneConsts.CONTAINER_EXTENSION);
} }
/** /**
* Return containerDB File. * Return containerDB File.
* @param containerMetaDataPath * @param containerMetaDataPath
* @param containerName * @param containerID
* @return containerDB File name * @return containerDB File name
*/ */
public static File getContainerDBFile(File containerMetaDataPath, String public static File getContainerDBFile(File containerMetaDataPath,
containerName) { long containerID) {
Preconditions.checkNotNull(containerMetaDataPath); Preconditions.checkNotNull(containerMetaDataPath);
Preconditions.checkNotNull(containerName); return new File(containerMetaDataPath, containerID + OzoneConsts
return new File(containerMetaDataPath, containerName + OzoneConsts
.DN_CONTAINER_DB); .DN_CONTAINER_DB);
} }
/**
* Returns container checksum file.
* @param containerMetaDataPath
* @param containerName
* @return container checksum file
*/
public static File getContainerCheckSumFile(File containerMetaDataPath,
String containerName) {
Preconditions.checkNotNull(containerMetaDataPath);
Preconditions.checkNotNull(containerName);
return new File(containerMetaDataPath, containerName + OzoneConsts
.CONTAINER_FILE_CHECKSUM_EXTENSION);
}
} }

View File

@ -18,23 +18,16 @@
package org.apache.hadoop.ozone.container.keyvalue.helpers; package org.apache.hadoop.ozone.container.keyvalue.helpers;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandRequestProto; .ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerCommandResponseProto; .ContainerCommandResponseProto;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataKeyFilters;
import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStore;
@ -43,18 +36,12 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.File; import java.io.File;
import java.io.FileInputStream;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
/** /**
* Class which defines utility methods for KeyValueContainer. * Class which defines utility methods for KeyValueContainer.
*/ */
@ -77,10 +64,8 @@ public final class KeyValueContainerUtil {
* @throws IOException * @throws IOException
*/ */
public static void createContainerMetaData(File containerMetaDataPath, File public static void createContainerMetaData(File containerMetaDataPath, File
chunksPath, File dbFile, String containerName, Configuration conf) throws chunksPath, File dbFile, Configuration conf) throws IOException {
IOException {
Preconditions.checkNotNull(containerMetaDataPath); Preconditions.checkNotNull(containerMetaDataPath);
Preconditions.checkNotNull(containerName);
Preconditions.checkNotNull(conf); Preconditions.checkNotNull(conf);
if (!containerMetaDataPath.mkdirs()) { if (!containerMetaDataPath.mkdirs()) {
@ -165,107 +150,32 @@ public final class KeyValueContainerUtil {
return builder.build(); return builder.build();
} }
/**
* Compute checksum of the .container file.
* @param containerId
* @param containerFile
* @throws StorageContainerException
*/
public static String computeCheckSum(long containerId, File
containerFile) throws StorageContainerException {
Preconditions.checkNotNull(containerFile, "containerFile cannot be null");
MessageDigest sha;
FileInputStream containerFileStream = null;
try {
sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
} catch (NoSuchAlgorithmException e) {
throw new StorageContainerException("Unable to create Message Digest, " +
"usually this is a java configuration issue.", NO_SUCH_ALGORITHM);
}
try {
containerFileStream = new FileInputStream(containerFile);
byte[] byteArray = new byte[1024];
int bytesCount = 0;
while ((bytesCount = containerFileStream.read(byteArray)) != -1) {
sha.update(byteArray, 0, bytesCount);
}
String checksum = DigestUtils.sha256Hex(sha.digest());
return checksum;
} catch (IOException ex) {
throw new StorageContainerException("Error during computing checksum: " +
"for container " + containerId, ex, CONTAINER_CHECKSUM_ERROR);
} finally {
IOUtils.closeStream(containerFileStream);
}
}
/**
* Verify checksum of the container.
* @param containerId
* @param checksumFile
* @param checksum
* @throws StorageContainerException
*/
public static void verifyCheckSum(long containerId, File checksumFile,
String checksum)
throws StorageContainerException {
try {
Preconditions.checkNotNull(checksum);
Preconditions.checkNotNull(checksumFile);
Path path = Paths.get(checksumFile.getAbsolutePath());
List<String> fileCheckSum = Files.readAllLines(path);
Preconditions.checkState(fileCheckSum.size() == 1, "checksum " +
"should be 32 byte string");
if (!checksum.equals(fileCheckSum.get(0))) {
LOG.error("Checksum mismatch for the container {}", containerId);
throw new StorageContainerException("Checksum mismatch for " +
"the container " + containerId, CHECKSUM_MISMATCH);
}
} catch (StorageContainerException ex) {
throw ex;
} catch (IOException ex) {
LOG.error("Error during verify checksum for container {}", containerId);
throw new StorageContainerException("Error during verify checksum" +
" for container " + containerId, IO_EXCEPTION);
}
}
/** /**
* Parse KeyValueContainerData and verify checksum. * Parse KeyValueContainerData and verify checksum.
* @param containerData * @param kvContainerData
* @param containerFile
* @param checksumFile
* @param dbFile
* @param config * @param config
* @throws IOException * @throws IOException
*/ */
public static void parseKeyValueContainerData( public static void parseKVContainerData(KeyValueContainerData kvContainerData,
KeyValueContainerData containerData, File containerFile, File OzoneConfiguration config) throws IOException {
checksumFile, File dbFile, OzoneConfiguration config) throws IOException {
Preconditions.checkNotNull(containerData, "containerData cannot be null"); long containerID = kvContainerData.getContainerID();
Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); File metadataPath = new File(kvContainerData.getMetadataPath());
Preconditions.checkNotNull(checksumFile, "checksumFile cannot be null");
Preconditions.checkNotNull(dbFile, "dbFile cannot be null");
Preconditions.checkNotNull(config, "ozone config cannot be null");
long containerId = containerData.getContainerID();
String containerName = String.valueOf(containerId);
File metadataPath = new File(containerData.getMetadataPath());
Preconditions.checkNotNull(containerName, "container Name cannot be " +
"null");
Preconditions.checkNotNull(metadataPath, "metadata path cannot be " +
"null");
// Verify Checksum // Verify Checksum
String checksum = KeyValueContainerUtil.computeCheckSum( ContainerUtils.verifyChecksum(kvContainerData);
containerData.getContainerID(), containerFile);
KeyValueContainerUtil.verifyCheckSum(containerId, checksumFile, checksum);
containerData.setDbFile(dbFile); File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(
metadataPath, containerID);
if (!dbFile.exists()) {
LOG.error("Container DB file is missing for ContainerID {}. " +
"Skipping loading of this container.", containerID);
// Don't further process this container, as it is missing db file.
return;
}
kvContainerData.setDbFile(dbFile);
MetadataStore metadata = KeyUtils.getDB(containerData, config); MetadataStore metadata = KeyUtils.getDB(kvContainerData, config);
long bytesUsed = 0; long bytesUsed = 0;
List<Map.Entry<byte[], byte[]>> liveKeys = metadata List<Map.Entry<byte[], byte[]>> liveKeys = metadata
.getRangeKVs(null, Integer.MAX_VALUE, .getRangeKVs(null, Integer.MAX_VALUE,
@ -279,8 +189,8 @@ public final class KeyValueContainerUtil {
return 0L; return 0L;
} }
}).sum(); }).sum();
containerData.setBytesUsed(bytesUsed); kvContainerData.setBytesUsed(bytesUsed);
containerData.setKeyCount(liveKeys.size()); kvContainerData.setKeyCount(liveKeys.size());
} }
/** /**

View File

@ -20,6 +20,9 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.common.Storage;
import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@ -38,7 +41,6 @@ import java.io.File;
import java.io.FileFilter; import java.io.FileFilter;
import java.io.IOException; import java.io.IOException;
/** /**
* Class used to read .container files from Volume and build container map. * Class used to read .container files from Volume and build container map.
* *
@ -46,15 +48,19 @@ import java.io.IOException;
* *
* ../hdds/VERSION * ../hdds/VERSION
* ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/metadata/<<containerID>>.container * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/metadata/<<containerID>>.container
* ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/metadata/<<containerID>>.checksum
* ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/metadata/<<containerID>>.db
* ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/<<dataPath>> * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/<<dataPath>>
* *
* Some ContainerTypes will have extra metadata other than the .container
* file. For example, KeyValueContainer will have a .db file. This .db file
* will also be stored in the metadata folder along with the .container file.
*
* ../hdds/<<scmUuid>>/current/<<containerDir>>/<<KVcontainerID>/metadata/<<KVcontainerID>>.db
*
* Note that the <<dataPath>> is dependent on the ContainerType. * Note that the <<dataPath>> is dependent on the ContainerType.
* For KeyValueContainers, the data is stored in a "chunks" folder. As such, * For KeyValueContainers, the data is stored in a "chunks" folder. As such,
* the <<dataPath>> layout for KeyValueContainers is * the <<dataPath>> layout for KeyValueContainers is
* *
* ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID>/chunks/<<chunksFile>> * ../hdds/<<scmUuid>>/current/<<containerDir>>/<<KVcontainerID>/chunks/<<chunksFile>>
* *
*/ */
public class ContainerReader implements Runnable { public class ContainerReader implements Runnable {
@ -124,22 +130,19 @@ public class ContainerReader implements Runnable {
for (File containerDir : containerDirs) { for (File containerDir : containerDirs) {
File metadataPath = new File(containerDir + File.separator + File metadataPath = new File(containerDir + File.separator +
OzoneConsts.CONTAINER_META_PATH); OzoneConsts.CONTAINER_META_PATH);
String containerName = containerDir.getName(); long containerID = Long.parseLong(containerDir.getName());
if (metadataPath.exists()) { if (metadataPath.exists()) {
File containerFile = KeyValueContainerLocationUtil File containerFile = KeyValueContainerLocationUtil
.getContainerFile(metadataPath, containerName); .getContainerFile(metadataPath, containerID);
File checksumFile = KeyValueContainerLocationUtil if (containerFile.exists()) {
.getContainerCheckSumFile(metadataPath, containerName); verifyContainerFile(containerID, containerFile);
if (containerFile.exists() && checksumFile.exists()) {
verifyContainerFile(containerName, containerFile,
checksumFile);
} else { } else {
LOG.error("Missing container metadata files for " + LOG.error("Missing .container file for ContainerID: {}",
"Container: {}", containerName); containerID);
} }
} else { } else {
LOG.error("Missing container metadata directory for " + LOG.error("Missing container metadata directory for " +
"Container: {}", containerName); "ContainerID: {}", containerID);
} }
} }
} }
@ -149,39 +152,46 @@ public class ContainerReader implements Runnable {
} }
} }
private void verifyContainerFile(String containerName, File containerFile, private void verifyContainerFile(long containerID, File containerFile) {
File checksumFile) {
try { try {
ContainerData containerData = ContainerDataYaml.readContainerFile( ContainerData containerData = ContainerDataYaml.readContainerFile(
containerFile); containerFile);
if (containerID != containerData.getContainerID()) {
switch (containerData.getContainerType()) { LOG.error("Invalid ContainerID in file {}. " +
case KeyValueContainer: "Skipping loading of this container.", containerFile);
KeyValueContainerData keyValueContainerData = (KeyValueContainerData) return;
containerData;
containerData.setVolume(hddsVolume);
File dbFile = KeyValueContainerLocationUtil
.getContainerDBFile(new File(containerFile.getParent()),
containerName);
if (!dbFile.exists()) {
LOG.error("Container DB file is missing for Container {}, skipping " +
"this", containerName);
// Don't further process this container, as it is missing db file.
return;
}
KeyValueContainerUtil.parseKeyValueContainerData(keyValueContainerData,
containerFile, checksumFile, dbFile, config);
KeyValueContainer keyValueContainer = new KeyValueContainer(
keyValueContainerData, config);
containerSet.addContainer(keyValueContainer);
break;
default:
LOG.error("Unrecognized ContainerType {} format during verify " +
"ContainerFile", containerData.getContainerType());
} }
verifyContainerData(containerData);
} catch (IOException ex) { } catch (IOException ex) {
LOG.error("Error during reading container file {}", containerFile); LOG.error("Failed to parse ContainerFile for ContainerID: {}",
containerID, ex);
} }
} }
public void verifyContainerData(ContainerData containerData)
throws IOException {
switch (containerData.getContainerType()) {
case KeyValueContainer:
if (containerData instanceof KeyValueContainerData) {
KeyValueContainerData kvContainerData = (KeyValueContainerData)
containerData;
containerData.setVolume(hddsVolume);
KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
KeyValueContainer kvContainer = new KeyValueContainer(
kvContainerData, config);
containerSet.addContainer(kvContainer);
} else {
throw new StorageContainerException("Container File is corrupted. " +
"ContainerType is KeyValueContainer but cast to " +
"KeyValueContainerData failed. ",
ContainerProtos.Result.CONTAINER_METADATA_ERROR);
}
break;
default:
throw new StorageContainerException("Unrecognized ContainerType " +
containerData.getContainerType(),
ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE);
}
}
} }

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container.common.impl;
import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test; import org.junit.Test;
@ -37,39 +38,58 @@ import static org.junit.Assert.fail;
*/ */
public class TestContainerDataYaml { public class TestContainerDataYaml {
private static final int MAXSIZE = 5; private static long testContainerID = 1234;
@Test
public void testCreateContainerFile() throws IOException {
String path = new FileSystemTestHelper().getTestRootDir();
String containerPath = "1.container";
File filePath = new File(new FileSystemTestHelper().getTestRootDir()); private static String testRoot = new FileSystemTestHelper().getTestRootDir();
filePath.mkdirs();
private static final int MAXSIZE = 5;
/**
* Creates a .container file. cleanup() should be called at the end of the
* test when container file is created.
*/
private File createContainerFile(long containerID) throws IOException {
new File(testRoot).mkdirs();
String containerPath = containerID + ".container";
KeyValueContainerData keyValueContainerData = new KeyValueContainerData( KeyValueContainerData keyValueContainerData = new KeyValueContainerData(
Long.MAX_VALUE, MAXSIZE); containerID, MAXSIZE);
keyValueContainerData.setContainerDBType("RocksDB"); keyValueContainerData.setContainerDBType("RocksDB");
keyValueContainerData.setMetadataPath(path); keyValueContainerData.setMetadataPath(testRoot);
keyValueContainerData.setChunksPath(path); keyValueContainerData.setChunksPath(testRoot);
File containerFile = new File(filePath, containerPath); File containerFile = new File(testRoot, containerPath);
// Create .container file with ContainerData // Create .container file with ContainerData
ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
.KeyValueContainer, containerFile, keyValueContainerData); .KeyValueContainer, keyValueContainerData, containerFile);
//Check .container file exists or not. //Check .container file exists or not.
assertTrue(containerFile.exists()); assertTrue(containerFile.exists());
return containerFile;
}
private void cleanup() {
FileUtil.fullyDelete(new File(testRoot));
}
@Test
public void testCreateContainerFile() throws IOException {
long containerID = testContainerID++;
File containerFile = createContainerFile(containerID);
// Read from .container file, and verify data. // Read from .container file, and verify data.
KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
.readContainerFile(containerFile); .readContainerFile(containerFile);
assertEquals(Long.MAX_VALUE, kvData.getContainerID()); assertEquals(containerID, kvData.getContainerID());
assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
.getContainerType()); .getContainerType());
assertEquals("RocksDB", kvData.getContainerDBType()); assertEquals("RocksDB", kvData.getContainerDBType());
assertEquals(path, kvData.getMetadataPath()); assertEquals(containerFile.getParent(), kvData.getMetadataPath());
assertEquals(path, kvData.getChunksPath()); assertEquals(containerFile.getParent(), kvData.getChunksPath());
assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData
.getState()); .getState());
assertEquals(1, kvData.getLayOutVersion()); assertEquals(1, kvData.getLayOutVersion());
@ -82,22 +102,20 @@ public class TestContainerDataYaml {
kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED); kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
// Update .container file with new ContainerData.
containerFile = new File(filePath, containerPath);
ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
.KeyValueContainer, containerFile, kvData); .KeyValueContainer, kvData, containerFile);
// Reading newly updated data from .container file // Reading newly updated data from .container file
kvData = (KeyValueContainerData) ContainerDataYaml.readContainerFile( kvData = (KeyValueContainerData) ContainerDataYaml.readContainerFile(
containerFile); containerFile);
// verify data. // verify data.
assertEquals(Long.MAX_VALUE, kvData.getContainerID()); assertEquals(containerID, kvData.getContainerID());
assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
.getContainerType()); .getContainerType());
assertEquals("RocksDB", kvData.getContainerDBType()); assertEquals("RocksDB", kvData.getContainerDBType());
assertEquals(path, kvData.getMetadataPath()); assertEquals(containerFile.getParent(), kvData.getMetadataPath());
assertEquals(path, kvData.getChunksPath()); assertEquals(containerFile.getParent(), kvData.getChunksPath());
assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData
.getState()); .getState());
assertEquals(1, kvData.getLayOutVersion()); assertEquals(1, kvData.getLayOutVersion());
@ -105,19 +123,15 @@ public class TestContainerDataYaml {
assertEquals("hdfs", kvData.getMetadata().get("VOLUME")); assertEquals("hdfs", kvData.getMetadata().get("VOLUME"));
assertEquals("ozone", kvData.getMetadata().get("OWNER")); assertEquals("ozone", kvData.getMetadata().get("OWNER"));
assertEquals(MAXSIZE, kvData.getMaxSizeGB()); assertEquals(MAXSIZE, kvData.getMaxSizeGB());
FileUtil.fullyDelete(filePath);
} }
@Test @Test
public void testIncorrectContainerFile() throws IOException{ public void testIncorrectContainerFile() throws IOException{
try { try {
String path = "incorrect.container"; String containerFile = "incorrect.container";
//Get file from resources folder //Get file from resources folder
ClassLoader classLoader = getClass().getClassLoader(); ClassLoader classLoader = getClass().getClassLoader();
File file = new File(classLoader.getResource(path).getFile()); File file = new File(classLoader.getResource(containerFile).getFile());
KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
.readContainerFile(file); .readContainerFile(file);
fail("testIncorrectContainerFile failed"); fail("testIncorrectContainerFile failed");
@ -137,12 +151,13 @@ public class TestContainerDataYaml {
// created or not. // created or not.
try { try {
String path = "additionalfields.container"; String containerFile = "additionalfields.container";
//Get file from resources folder //Get file from resources folder
ClassLoader classLoader = getClass().getClassLoader(); ClassLoader classLoader = getClass().getClassLoader();
File file = new File(classLoader.getResource(path).getFile()); File file = new File(classLoader.getResource(containerFile).getFile());
KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
.readContainerFile(file); .readContainerFile(file);
ContainerUtils.verifyChecksum(kvData);
//Checking the Container file data is consistent or not //Checking the Container file data is consistent or not
assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData
@ -159,9 +174,45 @@ public class TestContainerDataYaml {
assertEquals(2, kvData.getMetadata().size()); assertEquals(2, kvData.getMetadata().size());
} catch (Exception ex) { } catch (Exception ex) {
ex.printStackTrace();
fail("testCheckBackWardCompatabilityOfContainerFile failed"); fail("testCheckBackWardCompatabilityOfContainerFile failed");
} }
} }
/**
* Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}.
*/
@Test
public void testChecksumInContainerFile() throws IOException {
long containerID = testContainerID++;
File containerFile = createContainerFile(containerID);
// Read from .container file, and verify data.
KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
.readContainerFile(containerFile);
ContainerUtils.verifyChecksum(kvData);
cleanup();
}
/**
* Test to verify incorrect checksum is detected.
*/
@Test
public void testIncorrectChecksum() {
try {
String containerFile = "incorrect.checksum.container";
//Get file from resources folder
ClassLoader classLoader = getClass().getClassLoader();
File file = new File(classLoader.getResource(containerFile).getFile());
KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
.readContainerFile(file);
ContainerUtils.verifyChecksum(kvData);
fail("testIncorrectChecksum failed");
} catch (Exception ex) {
GenericTestUtils.assertExceptionContains("Container checksum error for " +
"ContainerID:", ex);
}
}
} }

View File

@ -69,8 +69,7 @@ public class TestKeyValueContainer {
private String scmId = UUID.randomUUID().toString(); private String scmId = UUID.randomUUID().toString();
private VolumeSet volumeSet; private VolumeSet volumeSet;
private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
private long containerId = 1L; private long containerID = 1L;
private String containerName = String.valueOf(containerId);
private KeyValueContainerData keyValueContainerData; private KeyValueContainerData keyValueContainerData;
private KeyValueContainer keyValueContainer; private KeyValueContainer keyValueContainer;
@ -111,16 +110,12 @@ public class TestKeyValueContainer {
assertTrue(chunksPath != null); assertTrue(chunksPath != null);
File containerMetaDataLoc = new File(containerMetaDataPath); File containerMetaDataLoc = new File(containerMetaDataPath);
//Check whether container file, check sum file and container db file exists //Check whether container file and container db file exists or not.
// or not.
assertTrue(KeyValueContainerLocationUtil.getContainerFile( assertTrue(KeyValueContainerLocationUtil.getContainerFile(
containerMetaDataLoc, containerName).exists(), ".Container File does" + containerMetaDataLoc, containerID).exists(), ".Container File does" +
" not exist"); " not exist");
assertTrue(KeyValueContainerLocationUtil.getContainerCheckSumFile(
containerMetaDataLoc, containerName).exists(), "Container check sum " +
"File does" + " not exist");
assertTrue(KeyValueContainerLocationUtil.getContainerDBFile( assertTrue(KeyValueContainerLocationUtil.getContainerDBFile(
containerMetaDataLoc, containerName).exists(), "Container DB does " + containerMetaDataLoc, containerID).exists(), "Container DB does " +
"not exist"); "not exist");
} }
@ -172,10 +167,10 @@ public class TestKeyValueContainer {
assertFalse("Container File still exists", assertFalse("Container File still exists",
KeyValueContainerLocationUtil.getContainerFile(containerMetaDataLoc, KeyValueContainerLocationUtil.getContainerFile(containerMetaDataLoc,
containerName).exists()); containerID).exists());
assertFalse("Container DB file still exists", assertFalse("Container DB file still exists",
KeyValueContainerLocationUtil.getContainerDBFile(containerMetaDataLoc, KeyValueContainerLocationUtil.getContainerDBFile(containerMetaDataLoc,
containerName).exists()); containerID).exists());
} }
@ -195,7 +190,7 @@ public class TestKeyValueContainer {
.getMetadataPath(); .getMetadataPath();
File containerMetaDataLoc = new File(containerMetaDataPath); File containerMetaDataLoc = new File(containerMetaDataPath);
File containerFile = KeyValueContainerLocationUtil.getContainerFile( File containerFile = KeyValueContainerLocationUtil.getContainerFile(
containerMetaDataLoc, containerName); containerMetaDataLoc, containerID);
keyValueContainerData = (KeyValueContainerData) ContainerDataYaml keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
.readContainerFile(containerFile); .readContainerFile(containerFile);
@ -236,7 +231,7 @@ public class TestKeyValueContainer {
.getMetadataPath(); .getMetadataPath();
File containerMetaDataLoc = new File(containerMetaDataPath); File containerMetaDataLoc = new File(containerMetaDataPath);
File containerFile = KeyValueContainerLocationUtil.getContainerFile( File containerFile = KeyValueContainerLocationUtil.getContainerFile(
containerMetaDataLoc, containerName); containerMetaDataLoc, containerID);
keyValueContainerData = (KeyValueContainerData) ContainerDataYaml keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
.readContainerFile(containerFile); .readContainerFile(containerFile);

View File

@ -8,4 +8,5 @@ layOutVersion: 1
maxSizeGB: 5 maxSizeGB: 5
metadata: {OWNER: ozone, VOLUME: hdfs} metadata: {OWNER: ozone, VOLUME: hdfs}
state: CLOSED state: CLOSED
aclEnabled: true aclEnabled: true
checksum: 1bbff32aeaa8fadc0b80c5c1e0597036e96acd8ae4bddbed188a2162762251a2

View File

@ -0,0 +1,11 @@
!<KeyValueContainerData>
containerDBType: RocksDB
chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
containerID: 9223372036854775807
containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
layOutVersion: 1
maxSizeGB: 5
metadata: {OWNER: ozone, VOLUME: hdfs}
state: OPEN
checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f

View File

@ -7,4 +7,5 @@ metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
layOutVersion: 1 layOutVersion: 1
maxSizeGB: 5 maxSizeGB: 5
metadata: {OWNER: ozone, VOLUME: hdfs} metadata: {OWNER: ozone, VOLUME: hdfs}
state: INVALID state: INVALID
checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f

View File

@ -104,6 +104,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js.map</exclude> <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js.map</exclude>
<exclude>src/test/resources/incorrect.container</exclude> <exclude>src/test/resources/incorrect.container</exclude>
<exclude>src/test/resources/additionalfields.container</exclude> <exclude>src/test/resources/additionalfields.container</exclude>
<exclude>src/test/resources/incorrect.checksum.container</exclude>
</excludes> </excludes>
</configuration> </configuration>
</plugin> </plugin>

View File

@ -716,7 +716,7 @@ public class TestContainerPersistence {
File orgContainerFile = KeyValueContainerLocationUtil.getContainerFile( File orgContainerFile = KeyValueContainerLocationUtil.getContainerFile(
new File(container.getContainerData().getMetadataPath()), new File(container.getContainerData().getMetadataPath()),
String.valueOf(testContainerID)); testContainerID);
Assert.assertTrue(orgContainerFile.exists()); Assert.assertTrue(orgContainerFile.exists());
Map<String, String> newMetadata = Maps.newHashMap(); Map<String, String> newMetadata = Maps.newHashMap();
@ -740,7 +740,7 @@ public class TestContainerPersistence {
// Verify container data on disk // Verify container data on disk
File newContainerFile = KeyValueContainerLocationUtil.getContainerFile( File newContainerFile = KeyValueContainerLocationUtil.getContainerFile(
new File(actualNewData.getMetadataPath()), new File(actualNewData.getMetadataPath()),
String.valueOf(testContainerID)); testContainerID);
Assert.assertTrue("Container file should exist.", Assert.assertTrue("Container file should exist.",
newContainerFile.exists()); newContainerFile.exists());
Assert.assertEquals("Container file should be in same location.", Assert.assertEquals("Container file should be in same location.",
@ -780,8 +780,8 @@ public class TestContainerPersistence {
// Update a non-existing container // Update a non-existing container
exception.expect(StorageContainerException.class); exception.expect(StorageContainerException.class);
exception.expectMessage("Container is an Inconsistent state, missing " + exception.expectMessage("Container is an Inconsistent " +
"required files(.container, .chksm)."); "state, missing .container file.");
Container nonExistentContainer = new KeyValueContainer( Container nonExistentContainer = new KeyValueContainer(
new KeyValueContainerData(RandomUtils.nextLong(), new KeyValueContainerData(RandomUtils.nextLong(),
ContainerTestHelper.CONTAINER_MAX_SIZE_GB), conf); ContainerTestHelper.CONTAINER_MAX_SIZE_GB), conf);

View File

@ -33,11 +33,14 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.Timeout; import org.junit.rules.Timeout;
import java.util.*; import java.util.*;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
/** /**
* Tests ozone containers. * Tests ozone containers.
*/ */
@ -48,6 +51,9 @@ public class TestOzoneContainer {
@Rule @Rule
public Timeout testTimeout = new Timeout(300000); public Timeout testTimeout = new Timeout(300000);
@Rule
public TemporaryFolder tempFolder = new TemporaryFolder();
@Test @Test
public void testCreateOzoneContainer() throws Exception { public void testCreateOzoneContainer() throws Exception {
long containerID = ContainerTestHelper.getTestContainerID(); long containerID = ContainerTestHelper.getTestContainerID();
@ -60,6 +66,7 @@ public class TestOzoneContainer {
// We don't start Ozone Container via data node, we will do it // We don't start Ozone Container via data node, we will do it
// independently in our test path. // independently in our test path.
Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader() conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);