HADOOP-12029. Remove chunkSize from ECSchema as its not required for coders (Contributed by Vinayakumar B)
This commit is contained in:
parent
80c56c2d31
commit
5a391e1d25
@ -56,3 +56,6 @@
|
||||
coder. (Kai Zheng via Zhe Zhang)
|
||||
|
||||
HADOOP-12013. Generate fixed data to perform erasure coder test. (Kai Zheng)
|
||||
|
||||
HADOOP-12029. Remove chunkSize from ECSchema as its not required for coders
|
||||
(vinayakumarb)
|
||||
|
@ -28,8 +28,6 @@ public final class ECSchema {
|
||||
public static final String NUM_DATA_UNITS_KEY = "k";
|
||||
public static final String NUM_PARITY_UNITS_KEY = "m";
|
||||
public static final String CODEC_NAME_KEY = "codec";
|
||||
public static final String CHUNK_SIZE_KEY = "chunkSize";
|
||||
public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
|
||||
|
||||
/**
|
||||
* A friendly and understandable name that can mean what's it, also serves as
|
||||
@ -52,11 +50,6 @@ public final class ECSchema {
|
||||
*/
|
||||
private final int numParityUnits;
|
||||
|
||||
/**
|
||||
* Unit data size for each chunk in a coding
|
||||
*/
|
||||
private final int chunkSize;
|
||||
|
||||
/*
|
||||
* An erasure code can have its own specific advanced parameters, subject to
|
||||
* itself to interpret these key-value settings.
|
||||
@ -92,17 +85,9 @@ public ECSchema(String schemaName, Map<String, String> allOptions) {
|
||||
this.numDataUnits = tmpNumDataUnits;
|
||||
this.numParityUnits = tmpNumParityUnits;
|
||||
|
||||
int tmpChunkSize = extractIntOption(CHUNK_SIZE_KEY, allOptions);
|
||||
if (tmpChunkSize > 0) {
|
||||
this.chunkSize = tmpChunkSize;
|
||||
} else {
|
||||
this.chunkSize = DEFAULT_CHUNK_SIZE;
|
||||
}
|
||||
|
||||
allOptions.remove(CODEC_NAME_KEY);
|
||||
allOptions.remove(NUM_DATA_UNITS_KEY);
|
||||
allOptions.remove(NUM_PARITY_UNITS_KEY);
|
||||
allOptions.remove(CHUNK_SIZE_KEY);
|
||||
// After some cleanup
|
||||
this.extraOptions = Collections.unmodifiableMap(allOptions);
|
||||
}
|
||||
@ -144,14 +129,6 @@ public ECSchema(String schemaName, String codecName, int numDataUnits,
|
||||
extraOptions = new HashMap<>();
|
||||
}
|
||||
|
||||
int tmpChunkSize = extractIntOption(CHUNK_SIZE_KEY, extraOptions);
|
||||
if (tmpChunkSize > 0) {
|
||||
this.chunkSize = tmpChunkSize;
|
||||
} else {
|
||||
this.chunkSize = DEFAULT_CHUNK_SIZE;
|
||||
}
|
||||
|
||||
extraOptions.remove(CHUNK_SIZE_KEY);
|
||||
// After some cleanup
|
||||
this.extraOptions = Collections.unmodifiableMap(extraOptions);
|
||||
}
|
||||
@ -216,14 +193,6 @@ public int getNumParityUnits() {
|
||||
return numParityUnits;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get chunk buffer size for the erasure encoding/decoding.
|
||||
* @return chunk buffer size
|
||||
*/
|
||||
public int getChunkSize() {
|
||||
return chunkSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a meaningful string representation for log output.
|
||||
* @return string representation
|
||||
@ -235,9 +204,8 @@ public String toString() {
|
||||
sb.append("Name=" + schemaName + ", ");
|
||||
sb.append("Codec=" + codecName + ", ");
|
||||
sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", ");
|
||||
sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits + ", ");
|
||||
sb.append(CHUNK_SIZE_KEY + "=" + chunkSize +
|
||||
(extraOptions.isEmpty() ? "" : ", "));
|
||||
sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits);
|
||||
sb.append((extraOptions.isEmpty() ? "" : ", "));
|
||||
|
||||
int i = 0;
|
||||
for (String opt : extraOptions.keySet()) {
|
||||
@ -267,9 +235,6 @@ public boolean equals(Object o) {
|
||||
if (numParityUnits != ecSchema.numParityUnits) {
|
||||
return false;
|
||||
}
|
||||
if (chunkSize != ecSchema.chunkSize) {
|
||||
return false;
|
||||
}
|
||||
if (!schemaName.equals(ecSchema.schemaName)) {
|
||||
return false;
|
||||
}
|
||||
@ -286,7 +251,6 @@ public int hashCode() {
|
||||
result = 31 * result + extraOptions.hashCode();
|
||||
result = 31 * result + numDataUnits;
|
||||
result = 31 * result + numParityUnits;
|
||||
result = 31 * result + chunkSize;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ public void testGoodSchema() {
|
||||
String schemaName = "goodSchema";
|
||||
int numDataUnits = 6;
|
||||
int numParityUnits = 3;
|
||||
int chunkSize = 64 * 1024 * 1024;
|
||||
String codec = "rs";
|
||||
String extraOption = "extraOption";
|
||||
String extraOptionValue = "extraOptionValue";
|
||||
@ -38,7 +37,6 @@ public void testGoodSchema() {
|
||||
options.put(ECSchema.NUM_DATA_UNITS_KEY, String.valueOf(numDataUnits));
|
||||
options.put(ECSchema.NUM_PARITY_UNITS_KEY, String.valueOf(numParityUnits));
|
||||
options.put(ECSchema.CODEC_NAME_KEY, codec);
|
||||
options.put(ECSchema.CHUNK_SIZE_KEY, String.valueOf(chunkSize));
|
||||
options.put(extraOption, extraOptionValue);
|
||||
|
||||
ECSchema schema = new ECSchema(schemaName, options);
|
||||
@ -47,7 +45,6 @@ public void testGoodSchema() {
|
||||
assertEquals(schemaName, schema.getSchemaName());
|
||||
assertEquals(numDataUnits, schema.getNumDataUnits());
|
||||
assertEquals(numParityUnits, schema.getNumParityUnits());
|
||||
assertEquals(chunkSize, schema.getChunkSize());
|
||||
assertEquals(codec, schema.getCodecName());
|
||||
assertEquals(extraOptionValue, schema.getExtraOptions().get(extraOption));
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user