HDDS-34. Remove .meta file during creation of container

Contributed by Bharat Viswanadham.
This commit is contained in:
Anu Engineer 2018-05-10 17:08:26 -07:00
parent db1ab0fc16
commit 30293f6065
6 changed files with 9 additions and 105 deletions

View File

@ -221,17 +221,12 @@ message ContainerData {
repeated KeyValue metadata = 2;
optional string dbPath = 3;
optional string containerPath = 4;
optional string hash = 5;
optional int64 bytesUsed = 6;
optional int64 size = 7;
optional int64 keyCount = 8;
optional ContainerLifeCycleState state = 9 [default = OPEN];
}
message ContainerMeta {
required string fileName = 1;
required string hash = 2;
}
// Container Messages.
message CreateContainerRequestProto {

View File

@ -18,14 +18,12 @@
package org.apache.hadoop.ozone.container.common.helpers;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerLifeCycleState;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.util.Time;
import java.io.IOException;
import java.util.Collections;
@ -45,7 +43,6 @@ public class ContainerData {
private String dbPath; // Path to Level DB Store.
// Path to Physical file system where container and checksum are stored.
private String containerFilePath;
private String hash;
private AtomicLong bytesUsed;
private long maxSize;
private long containerID;
@ -95,10 +92,6 @@ public static ContainerData getFromProtBuf(
data.setState(protoData.getState());
}
if(protoData.hasHash()) {
data.setHash(protoData.getHash());
}
if (protoData.hasBytesUsed()) {
data.setBytesUsed(protoData.getBytesUsed());
}
@ -123,10 +116,6 @@ public ContainerProtos.ContainerData getProtoBufMessage() {
builder.setDbPath(this.getDBPath());
}
if (this.getHash() != null) {
builder.setHash(this.getHash());
}
if (this.getContainerPath() != null) {
builder.setContainerPath(this.getContainerPath());
}
@ -274,22 +263,6 @@ public synchronized void closeContainer() {
// TODO: closed or closing here
setState(ContainerLifeCycleState.CLOSED);
// Some thing brain dead for now. name + Time stamp of when we get the close
// container message.
setHash(DigestUtils.sha256Hex(this.getContainerID() +
Long.toString(Time.monotonicNow())));
}
/**
* Final hash for this container.
* @return - Hash
*/
public String getHash() {
return hash;
}
public void setHash(String hash) {
this.hash = hash;
}
public void setMaxSize(long maxSize) {

View File

@ -47,7 +47,7 @@
import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
.UNABLE_TO_FIND_DATA_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
/**
* A set of helper functions to create proper responses.
@ -194,10 +194,9 @@ public static long getContainerIDFromFile(File containerFile) {
* Verifies that this in indeed a new container.
*
* @param containerFile - Container File to verify
* @param metadataFile - metadata File to verify
* @throws IOException
*/
public static void verifyIsNewContainer(File containerFile, File metadataFile)
public static void verifyIsNewContainer(File containerFile)
throws IOException {
Logger log = LoggerFactory.getLogger(ContainerManagerImpl.class);
if (containerFile.exists()) {
@ -207,13 +206,6 @@ public static void verifyIsNewContainer(File containerFile, File metadataFile)
"disk.");
}
if (metadataFile.exists()) {
log.error("metadata found on disk, but missing container. Refusing to" +
" write this container. File: {} ", metadataFile.toPath());
throw new FileAlreadyExistsException(("metadata found on disk, but " +
"missing container. Refusing to write this container."));
}
File parentPath = new File(containerFile.getParent());
if (!parentPath.exists() && !parentPath.mkdirs()) {
@ -228,11 +220,6 @@ public static void verifyIsNewContainer(File containerFile, File metadataFile)
throw new IOException("creation of a new container file failed.");
}
if (!metadataFile.createNewFile()) {
log.error("creation of the metadata file failed. File: {}",
metadataFile.toPath());
throw new IOException("creation of a new container file failed.");
}
}
public static String getContainerDbFileName(String containerName) {
@ -286,20 +273,6 @@ public static Path createMetadata(Path containerPath, String containerName,
return metadataPath;
}
/**
* Returns Metadata location.
*
* @param containerData - Data
* @param location - Path
* @return Path
*/
public static File getMetadataFile(ContainerData containerData,
Path location) {
return location.resolve(Long.toString(containerData
.getContainerID()).concat(CONTAINER_META))
.toFile();
}
/**
* Returns container file location.
*
@ -395,10 +368,10 @@ public static void removeContainer(ContainerData containerData,
String rootPath = getContainerNameFromFile(new File(containerData
.getContainerPath()));
Path containerPath = Paths.get(rootPath.concat(CONTAINER_EXTENSION));
Path metaPath = Paths.get(rootPath.concat(CONTAINER_META));
FileUtils.forceDelete(containerPath.toFile());
FileUtils.forceDelete(metaPath.toFile());
}
/**

View File

@ -20,7 +20,6 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@ -101,7 +100,6 @@
import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
/**
* A Generic ContainerManagerImpl that will be called from Ozone
@ -233,18 +231,11 @@ private void readContainerInfo(String containerName)
long containerID = Long.parseLong(keyName);
try {
String containerFileName = containerName.concat(CONTAINER_EXTENSION);
String metaFileName = containerName.concat(CONTAINER_META);
containerStream = new FileInputStream(containerFileName);
metaStream = new FileInputStream(metaFileName);
MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
dis = new DigestInputStream(containerStream, sha);
ContainerProtos.ContainerData containerDataProto =
ContainerProtos.ContainerData.parseDelimitedFrom(dis);
ContainerProtos.ContainerData.parseDelimitedFrom(containerStream);
ContainerData containerData;
if (containerDataProto == null) {
// Sometimes container metadata might have been created but empty,
@ -255,19 +246,6 @@ private void readContainerInfo(String containerName)
return;
}
containerData = ContainerData.getFromProtBuf(containerDataProto, conf);
ContainerProtos.ContainerMeta meta =
ContainerProtos.ContainerMeta.parseDelimitedFrom(metaStream);
if (meta != null && !DigestUtils.sha256Hex(sha.digest())
.equals(meta.getHash())) {
// This means we were not able read data from the disk when booted the
// datanode. We are going to rely on SCM understanding that we don't
// have valid data for this container when we send container reports.
// Hopefully SCM will ask us to delete this container and rebuild it.
LOG.error("Invalid SHA found for container data. Name :{}"
+ "cowardly refusing to read invalid data", containerName);
containerMap.put(containerID, new ContainerStatus(null));
return;
}
ContainerStatus containerStatus = new ContainerStatus(containerData);
// Initialize pending deletion blocks count in in-memory
@ -298,7 +276,7 @@ private void readContainerInfo(String containerName)
containerStatus.setBytesUsed(bytesUsed);
containerMap.put(containerID, containerStatus);
} catch (IOException | NoSuchAlgorithmException ex) {
} catch (IOException ex) {
LOG.error("read failed for file: {} ex: {}", containerName,
ex.getMessage());
@ -398,12 +376,10 @@ private void writeContainerInfo(ContainerData containerData,
File containerFile = ContainerUtils.getContainerFile(containerData,
location);
File metadataFile = ContainerUtils.getMetadataFile(containerData,
location);
String containerName = Long.toString(containerData.getContainerID());
if(!overwrite) {
ContainerUtils.verifyIsNewContainer(containerFile, metadataFile);
ContainerUtils.verifyIsNewContainer(containerFile);
metadataPath = this.locationManager.getDataPath(containerName);
metadataPath = ContainerUtils.createMetadata(metadataPath,
containerName, conf);
@ -412,7 +388,7 @@ private void writeContainerInfo(ContainerData containerData,
}
containerStream = new FileOutputStream(containerFile);
metaStream = new FileOutputStream(metadataFile);
MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
dos = new DigestOutputStream(containerStream, sha);
@ -425,13 +401,6 @@ private void writeContainerInfo(ContainerData containerData,
.getProtoBufMessage();
protoData.writeDelimitedTo(dos);
ContainerProtos.ContainerMeta protoMeta = ContainerProtos
.ContainerMeta.newBuilder()
.setFileName(containerFile.toString())
.setHash(DigestUtils.sha256Hex(sha.digest()))
.build();
protoMeta.writeDelimitedTo(metaStream);
} catch (IOException ex) {
// TODO : we need to clean up partially constructed files
// The proper way to do would be for a thread
@ -913,9 +882,6 @@ public ContainerReportsRequestProto getContainerReport() throws IOException {
.setWriteBytes(container.getWriteBytes())
.setContainerID(container.getContainer().getContainerID());
if (container.getContainer().getHash() != null) {
ciBuilder.setFinalhash(container.getContainer().getHash());
}
crBuilder.addReports(ciBuilder.build());
}

View File

@ -81,9 +81,6 @@ public void execute(CommandLine cmd) throws IOException {
containerData.getState() == ContainerLifeCycleState.OPEN ? "OPEN" :
"CLOSED";
logOut("Container State: %s", openStatus);
if (!containerData.getHash().isEmpty()) {
logOut("Container Hash: %s", containerData.getHash());
}
logOut("Container DB Path: %s", containerData.getDbPath());
logOut("Container Path: %s", containerData.getContainerPath());

View File

@ -332,7 +332,7 @@ public void testInfoContainer() throws Exception {
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
expected = String
.format(formatStrWithHash, container.getContainerID(), openStatus,
data.getHash(), data.getDBPath(), data.getContainerPath(), "",
data.getDBPath(), data.getContainerPath(), "",
datanodeDetails.getHostName(), datanodeDetails.getHostName());
assertEquals(expected, out.toString());
}