HDDS-885. Fix test failures due to ChecksumData. Contributed by Hanisha Koneru.

This commit is contained in:
Bharat Viswanadham 2018-12-03 10:52:49 -08:00
parent fb10803dfa
commit ef3b03b75a
10 changed files with 68 additions and 15 deletions

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hdds.scm.storage;
import org.apache.hadoop.hdds.scm.XceiverClientAsyncReply;
import org.apache.hadoop.hdds.scm.container.common.helpers
.BlockNotCommittedException;
import org.apache.hadoop.ozone.common.Checksum;
import org.apache.hadoop.ozone.common.ChecksumData;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.common.helpers
@ -305,10 +307,16 @@ public final class ContainerProtocolCalls {
KeyValue keyValue =
KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
.build();
Checksum checksum = new Checksum();
ChecksumData checksumData = checksum.computeChecksum(data);
ChunkInfo chunk =
ChunkInfo.newBuilder().setChunkName(blockID.getLocalID()
+ "_chunk").setOffset(0).setLen(data.length).
addMetadata(keyValue).build();
ChunkInfo.newBuilder()
.setChunkName(blockID.getLocalID() + "_chunk")
.setOffset(0)
.setLen(data.length)
.addMetadata(keyValue)
.setChecksumData(checksumData.getProtoBufMessage())
.build();
PutSmallFileRequestProto putSmallFileRequest =
PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)

View File

@ -342,8 +342,9 @@ public final class OzoneConfigKeys {
public static final String OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT = "SHA256";
public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM =
"ozone.client.bytes.per.checksum";
public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT =
1024 * 1024; // 1 MB
public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT = "1MB";
public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES =
1024 * 1024;
public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 256 * 1024;
/**

View File

@ -24,7 +24,9 @@ import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ChecksumType;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@ -67,7 +69,7 @@ public class Checksum {
this.checksumType = ChecksumType.valueOf(
OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT);
this.bytesPerChecksum = OzoneConfigKeys
.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT;
.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES; // Default is 1MB
}
/**
@ -236,4 +238,12 @@ public class Checksum {
return checksumData.verifyChecksumDataMatches(computedChecksumData);
}
/**
* Returns a ChecksumData with type NONE for testing.
*/
@VisibleForTesting
public static ContainerProtos.ChecksumData getNoChecksumDataProto() {
return new ChecksumData(ChecksumType.NONE, 0).getProtoBufMessage();
}
}

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import java.io.IOException;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.ozone.common.Checksum;
import org.apache.hadoop.ozone.common.ChecksumData;
/**
@ -105,7 +106,13 @@ public class ChunkInfo {
builder.setChunkName(this.getChunkName());
builder.setOffset(this.getOffset());
builder.setLen(this.getLen());
builder.setChecksumData(this.checksumData.getProtoBufMessage());
if (checksumData == null) {
// ChecksumData cannot be null while computing the protobufMessage.
// Set it to NONE type (equivalent to non checksum).
builder.setChecksumData(Checksum.getNoChecksumDataProto());
} else {
builder.setChecksumData(this.checksumData.getProtoBufMessage());
}
for (Map.Entry<String, String> entry : metadata.entrySet()) {
ContainerProtos.KeyValue.Builder keyValBuilder =

View File

@ -1397,5 +1397,23 @@
</description>
</property>
<property>
<name>ozone.client.checksum.type</name>
<value>SHA256</value>
<tag>OZONE, CLIENT, MANAGEMENT</tag>
<description>The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] determines
which algorithm would be used to compute checksum for chunk data.
Default checksum type is SHA256.
</description>
</property>
<property>
<name>ozone.client.bytes.per.checksum</name>
<value>1MB</value>
<tag>OZONE, CLIENT, MANAGEMENT</tag>
<description>Checksum will be computed for every bytes per checksum number
of bytes and stored sequentially.
</description>
</property>
</configuration>

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.WriteChunkRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerAction;
import org.apache.hadoop.ozone.common.Checksum;
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.interfaces.Handler;
@ -199,6 +200,7 @@ public class TestHddsDispatcher {
+ containerId + "_chunk_" + localId)
.setOffset(0)
.setLen(data.size())
.setChecksumData(Checksum.getNoChecksumDataProto())
.build();
WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto

View File

@ -171,9 +171,10 @@ public class RpcClient implements ClientProtocol {
OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
int configuredChecksumSize = conf.getInt(
int configuredChecksumSize = (int) conf.getStorageSize(
OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM,
OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT);
OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT,
StorageUnit.BYTES);
int checksumSize;
if(configuredChecksumSize <
OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE) {

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.Checksum;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@ -140,8 +141,7 @@ public class TestBlockDeletingService {
.setChunkName(chunk.getAbsolutePath())
.setLen(0)
.setOffset(0)
.setChecksumData(
ContainerProtos.ChecksumData.getDefaultInstance())
.setChecksumData(Checksum.getNoChecksumDataProto())
.build();
chunks.add(info);
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.common.helpers;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.common.Checksum;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
@ -41,7 +42,11 @@ public class TestBlockData {
static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset,
long len) {
return ContainerProtos.ChunkInfo.newBuilder()
.setChunkName(name).setOffset(offset).setLen(len).build();
.setChunkName(name)
.setOffset(offset)
.setLen(len)
.setChecksumData(Checksum.getNoChecksumDataProto())
.build();
}
@Test

View File

@ -132,9 +132,10 @@ public final class DistributedStorageHandler implements StorageHandler {
OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
int configuredChecksumSize = conf.getInt(
int configuredChecksumSize = (int) conf.getStorageSize(
OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM,
OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT);
OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT,
StorageUnit.BYTES);
int checksumSize;
if(configuredChecksumSize <
OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE) {