HDFS-4046. Rename ChecksumTypeProto enum NULL since it is illegal in C/C++. Contributed by Binglin Chang.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406011 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7ee5ce3176
commit
e0ce1b2475
|
@ -157,11 +157,11 @@ public abstract class HdfsProtoUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DataChecksum.Type fromProto(HdfsProtos.ChecksumTypeProto type) {
|
public static DataChecksum.Type fromProto(HdfsProtos.ChecksumTypeProto type) {
|
||||||
return DataChecksum.Type.valueOf(type.name());
|
return DataChecksum.Type.valueOf(type.getNumber());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static HdfsProtos.ChecksumTypeProto toProto(DataChecksum.Type type) {
|
public static HdfsProtos.ChecksumTypeProto toProto(DataChecksum.Type type) {
|
||||||
return HdfsProtos.ChecksumTypeProto.valueOf(type.name());
|
return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static InputStream vintPrefixed(final InputStream input)
|
public static InputStream vintPrefixed(final InputStream input)
|
||||||
|
|
|
@ -52,7 +52,7 @@ public abstract class DataTransferProtoUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ChecksumProto toProto(DataChecksum checksum) {
|
public static ChecksumProto toProto(DataChecksum checksum) {
|
||||||
ChecksumTypeProto type = ChecksumTypeProto.valueOf(checksum.getChecksumType().name());
|
ChecksumTypeProto type = HdfsProtoUtil.toProto(checksum.getChecksumType());
|
||||||
if (type == null) {
|
if (type == null) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Can't convert checksum to protobuf: " + checksum);
|
"Can't convert checksum to protobuf: " + checksum);
|
||||||
|
@ -68,7 +68,7 @@ public abstract class DataTransferProtoUtil {
|
||||||
if (proto == null) return null;
|
if (proto == null) return null;
|
||||||
|
|
||||||
int bytesPerChecksum = proto.getBytesPerChecksum();
|
int bytesPerChecksum = proto.getBytesPerChecksum();
|
||||||
DataChecksum.Type type = DataChecksum.Type.valueOf(proto.getType().name());
|
DataChecksum.Type type = HdfsProtoUtil.fromProto(proto.getType());
|
||||||
|
|
||||||
return DataChecksum.newDataChecksum(type, bytesPerChecksum);
|
return DataChecksum.newDataChecksum(type, bytesPerChecksum);
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
|
||||||
|
@ -67,7 +68,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||||
|
@ -129,7 +129,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
|
@ -961,7 +960,7 @@ public class PBHelper {
|
||||||
fs.getFileBufferSize(),
|
fs.getFileBufferSize(),
|
||||||
fs.getEncryptDataTransfer(),
|
fs.getEncryptDataTransfer(),
|
||||||
fs.getTrashInterval(),
|
fs.getTrashInterval(),
|
||||||
DataChecksum.Type.valueOf(fs.getChecksumType().name()));
|
HdfsProtoUtil.fromProto(fs.getChecksumType()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static FsServerDefaultsProto convert(FsServerDefaults fs) {
|
public static FsServerDefaultsProto convert(FsServerDefaults fs) {
|
||||||
|
@ -974,7 +973,7 @@ public class PBHelper {
|
||||||
.setFileBufferSize(fs.getFileBufferSize())
|
.setFileBufferSize(fs.getFileBufferSize())
|
||||||
.setEncryptDataTransfer(fs.getEncryptDataTransfer())
|
.setEncryptDataTransfer(fs.getEncryptDataTransfer())
|
||||||
.setTrashInterval(fs.getTrashInterval())
|
.setTrashInterval(fs.getTrashInterval())
|
||||||
.setChecksumType(ChecksumTypeProto.valueOf(fs.getChecksumType().name()))
|
.setChecksumType(HdfsProtoUtil.toProto(fs.getChecksumType()))
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -181,5 +181,5 @@ message OpBlockChecksumResponseProto {
|
||||||
required uint32 bytesPerCrc = 1;
|
required uint32 bytesPerCrc = 1;
|
||||||
required uint64 crcPerBlock = 2;
|
required uint64 crcPerBlock = 2;
|
||||||
required bytes md5 = 3;
|
required bytes md5 = 3;
|
||||||
optional ChecksumTypeProto crcType = 4 [default = CRC32];
|
optional ChecksumTypeProto crcType = 4 [default = CHECKSUM_CRC32];
|
||||||
}
|
}
|
||||||
|
|
|
@ -181,11 +181,13 @@ message HdfsFileStatusProto {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checksum algorithms/types used in HDFS
|
* Checksum algorithms/types used in HDFS
|
||||||
|
* Make sure this enum's integer values match enum values' id properties defined
|
||||||
|
* in org.apache.hadoop.util.DataChecksum.Type
|
||||||
*/
|
*/
|
||||||
enum ChecksumTypeProto {
|
enum ChecksumTypeProto {
|
||||||
NULL = 0;
|
CHECKSUM_NULL = 0;
|
||||||
CRC32 = 1;
|
CHECKSUM_CRC32 = 1;
|
||||||
CRC32C = 2;
|
CHECKSUM_CRC32C = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -199,7 +201,7 @@ message FsServerDefaultsProto {
|
||||||
required uint32 fileBufferSize = 5;
|
required uint32 fileBufferSize = 5;
|
||||||
optional bool encryptDataTransfer = 6 [default = false];
|
optional bool encryptDataTransfer = 6 [default = false];
|
||||||
optional uint64 trashInterval = 7 [default = 0];
|
optional uint64 trashInterval = 7 [default = 0];
|
||||||
optional ChecksumTypeProto checksumType = 8 [default = CRC32];
|
optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue