HDFS-8854. Erasure coding: add ECPolicy to replace schema+cellSize in hadoop-hdfs. Contributed by Walter Su.
This commit is contained in:
parent
fbf7e81ca0
commit
1d37a88121
|
@ -29,12 +29,6 @@ public final class ECSchema {
|
|||
public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
|
||||
public static final String CODEC_NAME_KEY = "codec";
|
||||
|
||||
/**
|
||||
* A friendly and understandable name that can mean what's it, also serves as
|
||||
* the identifier that distinguish it from other schemas.
|
||||
*/
|
||||
private final String schemaName;
|
||||
|
||||
/**
|
||||
* The erasure codec name associated.
|
||||
*/
|
||||
|
@ -59,14 +53,9 @@ public final class ECSchema {
|
|||
/**
|
||||
* Constructor with schema name and provided all options. Note the options may
|
||||
* contain additional information for the erasure codec to interpret further.
|
||||
* @param schemaName schema name
|
||||
* @param allOptions all schema options
|
||||
*/
|
||||
public ECSchema(String schemaName, Map<String, String> allOptions) {
|
||||
assert (schemaName != null && ! schemaName.isEmpty());
|
||||
|
||||
this.schemaName = schemaName;
|
||||
|
||||
public ECSchema(Map<String, String> allOptions) {
|
||||
if (allOptions == null || allOptions.isEmpty()) {
|
||||
throw new IllegalArgumentException("No schema options are provided");
|
||||
}
|
||||
|
@ -94,33 +83,27 @@ public final class ECSchema {
|
|||
|
||||
/**
|
||||
* Constructor with key parameters provided.
|
||||
* @param schemaName schema name
|
||||
* @param codecName codec name
|
||||
* @param numDataUnits number of data units used in the schema
|
||||
* @param numParityUnits number os parity units used in the schema
|
||||
*/
|
||||
public ECSchema(String schemaName, String codecName,
|
||||
int numDataUnits, int numParityUnits) {
|
||||
this(schemaName, codecName, numDataUnits, numParityUnits, null);
|
||||
public ECSchema(String codecName, int numDataUnits, int numParityUnits) {
|
||||
this(codecName, numDataUnits, numParityUnits, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor with key parameters provided. Note the extraOptions may contain
|
||||
* additional information for the erasure codec to interpret further.
|
||||
* @param schemaName schema name
|
||||
* @param codecName codec name
|
||||
* @param numDataUnits number of data units used in the schema
|
||||
* @param numParityUnits number os parity units used in the schema
|
||||
* @param extraOptions extra options to configure the codec
|
||||
*/
|
||||
public ECSchema(String schemaName, String codecName, int numDataUnits,
|
||||
int numParityUnits, Map<String, String> extraOptions) {
|
||||
|
||||
assert (schemaName != null && ! schemaName.isEmpty());
|
||||
public ECSchema(String codecName, int numDataUnits, int numParityUnits,
|
||||
Map<String, String> extraOptions) {
|
||||
assert (codecName != null && ! codecName.isEmpty());
|
||||
assert (numDataUnits > 0 && numParityUnits > 0);
|
||||
|
||||
this.schemaName = schemaName;
|
||||
this.codecName = codecName;
|
||||
this.numDataUnits = numDataUnits;
|
||||
this.numParityUnits = numParityUnits;
|
||||
|
@ -153,14 +136,6 @@ public final class ECSchema {
|
|||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the schema name
|
||||
* @return schema name
|
||||
*/
|
||||
public String getSchemaName() {
|
||||
return schemaName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the codec name
|
||||
* @return codec name
|
||||
|
@ -201,7 +176,6 @@ public final class ECSchema {
|
|||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("ECSchema=[");
|
||||
|
||||
sb.append("Name=" + schemaName + ", ");
|
||||
sb.append("Codec=" + codecName + ", ");
|
||||
sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", ");
|
||||
sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits);
|
||||
|
@ -235,9 +209,6 @@ public final class ECSchema {
|
|||
if (numParityUnits != ecSchema.numParityUnits) {
|
||||
return false;
|
||||
}
|
||||
if (!schemaName.equals(ecSchema.schemaName)) {
|
||||
return false;
|
||||
}
|
||||
if (!codecName.equals(ecSchema.codecName)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -246,8 +217,7 @@ public final class ECSchema {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = schemaName.hashCode();
|
||||
result = 31 * result + codecName.hashCode();
|
||||
int result = codecName.hashCode();
|
||||
result = 31 * result + extraOptions.hashCode();
|
||||
result = 31 * result + numDataUnits;
|
||||
result = 31 * result + numParityUnits;
|
||||
|
|
|
@ -1,152 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
import org.w3c.dom.Node;
|
||||
import org.w3c.dom.NodeList;
|
||||
import org.w3c.dom.Text;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
/**
|
||||
* A EC schema loading utility that loads predefined EC schemas from XML file
|
||||
*/
|
||||
public class SchemaLoader {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
SchemaLoader.class.getName());
|
||||
|
||||
/**
|
||||
* Load predefined ec schemas from configuration file. This file is
|
||||
* expected to be in the XML format.
|
||||
*/
|
||||
public List<ECSchema> loadSchema(String schemaFilePath) {
|
||||
File confFile = getSchemaFile(schemaFilePath);
|
||||
if (confFile == null) {
|
||||
LOG.warn("Not found any predefined EC schema file");
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
try {
|
||||
return loadSchema(confFile);
|
||||
} catch (ParserConfigurationException e) {
|
||||
throw new RuntimeException("Failed to load schema file: " + confFile);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to load schema file: " + confFile);
|
||||
} catch (SAXException e) {
|
||||
throw new RuntimeException("Failed to load schema file: " + confFile);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ECSchema> loadSchema(File schemaFile)
|
||||
throws ParserConfigurationException, IOException, SAXException {
|
||||
|
||||
LOG.info("Loading predefined EC schema file {}", schemaFile);
|
||||
|
||||
// Read and parse the schema file.
|
||||
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
|
||||
dbf.setIgnoringComments(true);
|
||||
DocumentBuilder builder = dbf.newDocumentBuilder();
|
||||
Document doc = builder.parse(schemaFile);
|
||||
Element root = doc.getDocumentElement();
|
||||
|
||||
if (!"schemas".equals(root.getTagName())) {
|
||||
throw new RuntimeException("Bad EC schema config file: " +
|
||||
"top-level element not <schemas>");
|
||||
}
|
||||
|
||||
NodeList elements = root.getChildNodes();
|
||||
List<ECSchema> schemas = new ArrayList<ECSchema>();
|
||||
for (int i = 0; i < elements.getLength(); i++) {
|
||||
Node node = elements.item(i);
|
||||
if (node instanceof Element) {
|
||||
Element element = (Element) node;
|
||||
if ("schema".equals(element.getTagName())) {
|
||||
ECSchema schema = loadSchema(element);
|
||||
schemas.add(schema);
|
||||
} else {
|
||||
LOG.warn("Bad element in EC schema configuration file: {}",
|
||||
element.getTagName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return schemas;
|
||||
}
|
||||
|
||||
/**
|
||||
* Path to the XML file containing predefined ec schemas. If the path is
|
||||
* relative, it is searched for in the classpath.
|
||||
*/
|
||||
private File getSchemaFile(String schemaFilePath) {
|
||||
File schemaFile = new File(schemaFilePath);
|
||||
if (! schemaFile.isAbsolute()) {
|
||||
URL url = Thread.currentThread().getContextClassLoader()
|
||||
.getResource(schemaFilePath);
|
||||
if (url == null) {
|
||||
LOG.warn("{} not found on the classpath.", schemaFilePath);
|
||||
schemaFile = null;
|
||||
} else if (! url.getProtocol().equalsIgnoreCase("file")) {
|
||||
throw new RuntimeException(
|
||||
"EC predefined schema file " + url +
|
||||
" found on the classpath is not on the local filesystem.");
|
||||
} else {
|
||||
schemaFile = new File(url.getPath());
|
||||
}
|
||||
}
|
||||
|
||||
return schemaFile;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads a schema from a schema element in the configuration file
|
||||
*/
|
||||
private ECSchema loadSchema(Element element) {
|
||||
String schemaName = element.getAttribute("name");
|
||||
Map<String, String> ecOptions = new HashMap<String, String>();
|
||||
NodeList fields = element.getChildNodes();
|
||||
|
||||
for (int i = 0; i < fields.getLength(); i++) {
|
||||
Node fieldNode = fields.item(i);
|
||||
if (fieldNode instanceof Element) {
|
||||
Element field = (Element) fieldNode;
|
||||
String tagName = field.getTagName();
|
||||
String value = ((Text) field.getFirstChild()).getData().trim();
|
||||
ecOptions.put(tagName, value);
|
||||
}
|
||||
}
|
||||
|
||||
ECSchema schema = new ECSchema(schemaName, ecOptions);
|
||||
return schema;
|
||||
}
|
||||
}
|
|
@ -26,7 +26,6 @@ public class TestECSchema {
|
|||
|
||||
@Test
|
||||
public void testGoodSchema() {
|
||||
String schemaName = "goodSchema";
|
||||
int numDataUnits = 6;
|
||||
int numParityUnits = 3;
|
||||
String codec = "rs";
|
||||
|
@ -39,10 +38,9 @@ public class TestECSchema {
|
|||
options.put(ECSchema.CODEC_NAME_KEY, codec);
|
||||
options.put(extraOption, extraOptionValue);
|
||||
|
||||
ECSchema schema = new ECSchema(schemaName, options);
|
||||
ECSchema schema = new ECSchema(options);
|
||||
System.out.println(schema.toString());
|
||||
|
||||
assertEquals(schemaName, schema.getSchemaName());
|
||||
|
||||
assertEquals(numDataUnits, schema.getNumDataUnits());
|
||||
assertEquals(numParityUnits, schema.getNumParityUnits());
|
||||
assertEquals(codec, schema.getCodecName());
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.PrintWriter;
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestSchemaLoader {
|
||||
|
||||
final static String TEST_DIR = new File(System.getProperty(
|
||||
"test.build.data", "/tmp")).getAbsolutePath();
|
||||
|
||||
final static String SCHEMA_FILE = new File(TEST_DIR, "test-ecschema")
|
||||
.getAbsolutePath();
|
||||
|
||||
@Test
|
||||
public void testLoadSchema() throws Exception {
|
||||
PrintWriter out = new PrintWriter(new FileWriter(SCHEMA_FILE));
|
||||
out.println("<?xml version=\"1.0\"?>");
|
||||
out.println("<schemas>");
|
||||
out.println(" <schema name=\"RSk6m3\">");
|
||||
out.println(" <numDataUnits>6</numDataUnits>");
|
||||
out.println(" <numParityUnits>3</numParityUnits>");
|
||||
out.println(" <codec>RS</codec>");
|
||||
out.println(" </schema>");
|
||||
out.println(" <schema name=\"RSk10m4\">");
|
||||
out.println(" <numDataUnits>10</numDataUnits>");
|
||||
out.println(" <numParityUnits>4</numParityUnits>");
|
||||
out.println(" <codec>RS</codec>");
|
||||
out.println(" </schema>");
|
||||
out.println("</schemas>");
|
||||
out.close();
|
||||
|
||||
SchemaLoader schemaLoader = new SchemaLoader();
|
||||
List<ECSchema> schemas = schemaLoader.loadSchema(SCHEMA_FILE);
|
||||
|
||||
assertEquals(2, schemas.size());
|
||||
|
||||
ECSchema schema1 = schemas.get(0);
|
||||
assertEquals("RSk6m3", schema1.getSchemaName());
|
||||
assertEquals(0, schema1.getExtraOptions().size());
|
||||
assertEquals(6, schema1.getNumDataUnits());
|
||||
assertEquals(3, schema1.getNumParityUnits());
|
||||
assertEquals("RS", schema1.getCodecName());
|
||||
|
||||
ECSchema schema2 = schemas.get(1);
|
||||
assertEquals("RSk10m4", schema2.getSchemaName());
|
||||
assertEquals(0, schema2.getExtraOptions().size());
|
||||
assertEquals(10, schema2.getNumDataUnits());
|
||||
assertEquals(4, schema2.getNumParityUnits());
|
||||
assertEquals("RS", schema2.getCodecName());
|
||||
}
|
||||
}
|
|
@ -183,8 +183,8 @@ public interface HdfsClientConfigKeys {
|
|||
|
||||
String THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size";
|
||||
/**
|
||||
* With default 6+3 schema, each normal read could span 6 DNs. So this
|
||||
* default value accommodates 3 read streams
|
||||
* With default RS-6-3-64k erasure coding policy, each normal read could span
|
||||
* 6 DNs, so this default value accommodates 3 read streams
|
||||
*/
|
||||
int THREADPOOL_SIZE_DEFAULT = 18;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A policy about how to write/read/code an erasure coding file.
|
||||
*/
|
||||
public final class ErasureCodingPolicy {
|
||||
|
||||
private final String name;
|
||||
private final ECSchema schema;
|
||||
private final int cellSize;
|
||||
|
||||
public ErasureCodingPolicy(String name, ECSchema schema, int cellSize){
|
||||
this.name = name;
|
||||
this.schema = schema;
|
||||
this.cellSize = cellSize;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public ECSchema getSchema() {
|
||||
return schema;
|
||||
}
|
||||
|
||||
public int getCellSize() {
|
||||
return cellSize;
|
||||
}
|
||||
|
||||
public int getNumDataUnits() {
|
||||
return schema.getNumDataUnits();
|
||||
}
|
||||
|
||||
public int getNumParityUnits() {
|
||||
return schema.getNumParityUnits();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ErasureCodingPolicy that = (ErasureCodingPolicy) o;
|
||||
|
||||
if (that.getName().equals(name) && that.getCellSize() == cellSize
|
||||
&& that.getSchema().equals(schema)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = name.hashCode();
|
||||
result = 31 * result + schema.hashCode();
|
||||
result = 31 * result + cellSize;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("ErasureCodingPolicy=[");
|
||||
sb.append("Name=" + name + ", ");
|
||||
sb.append("Schema=[" + schema.toString() + "], ");
|
||||
sb.append("CellSize=" + cellSize + " ");
|
||||
sb.append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -77,8 +77,8 @@ public final class HdfsConstants {
|
|||
|
||||
/*
|
||||
* These values correspond to the values used by the system default erasure
|
||||
* coding schema.
|
||||
* TODO: to be removed once all places use schema.
|
||||
* coding policy.
|
||||
* TODO: get these values from ec policy of the associated INodeFile
|
||||
*/
|
||||
|
||||
public static final byte NUM_DATA_BLOCKS = 6;
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
/** Interface that represents the over the wire information for a file.
|
||||
*/
|
||||
|
@ -49,8 +48,7 @@ public class HdfsFileStatus {
|
|||
|
||||
private final FileEncryptionInfo feInfo;
|
||||
|
||||
private final ECSchema ecSchema;
|
||||
private final int stripeCellSize;
|
||||
private final ErasureCodingPolicy ecPolicy;
|
||||
|
||||
// Used by dir, not including dot and dotdot. Always zero for a regular file.
|
||||
private final int childrenNum;
|
||||
|
@ -77,7 +75,7 @@ public class HdfsFileStatus {
|
|||
long blocksize, long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group, byte[] symlink,
|
||||
byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
|
||||
byte storagePolicy, ECSchema ecSchema, int stripeCellSize) {
|
||||
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
||||
this.length = length;
|
||||
this.isdir = isdir;
|
||||
this.block_replication = (short)block_replication;
|
||||
|
@ -97,8 +95,7 @@ public class HdfsFileStatus {
|
|||
this.childrenNum = childrenNum;
|
||||
this.feInfo = feInfo;
|
||||
this.storagePolicy = storagePolicy;
|
||||
this.ecSchema = ecSchema;
|
||||
this.stripeCellSize = stripeCellSize;
|
||||
this.ecPolicy = ecPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -256,12 +253,8 @@ public class HdfsFileStatus {
|
|||
return feInfo;
|
||||
}
|
||||
|
||||
public ECSchema getECSchema() {
|
||||
return ecSchema;
|
||||
}
|
||||
|
||||
public int getStripeCellSize() {
|
||||
return stripeCellSize;
|
||||
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||
return ecPolicy;
|
||||
}
|
||||
|
||||
public final int getChildrenNum() {
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.Comparator;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
/**
|
||||
* Collection of blocks with their locations and the file length.
|
||||
|
@ -38,8 +37,7 @@ public class LocatedBlocks {
|
|||
private final LocatedBlock lastLocatedBlock;
|
||||
private final boolean isLastBlockComplete;
|
||||
private final FileEncryptionInfo fileEncryptionInfo;
|
||||
private final ECSchema ecSchema;
|
||||
private final int stripeCellSize;
|
||||
private final ErasureCodingPolicy ecPolicy;
|
||||
|
||||
public LocatedBlocks() {
|
||||
fileLength = 0;
|
||||
|
@ -48,22 +46,20 @@ public class LocatedBlocks {
|
|||
lastLocatedBlock = null;
|
||||
isLastBlockComplete = false;
|
||||
fileEncryptionInfo = null;
|
||||
ecSchema = null;
|
||||
stripeCellSize = 0;
|
||||
ecPolicy = null;
|
||||
}
|
||||
|
||||
public LocatedBlocks(long flength, boolean isUnderConstuction,
|
||||
List<LocatedBlock> blks, LocatedBlock lastBlock,
|
||||
boolean isLastBlockCompleted, FileEncryptionInfo feInfo,
|
||||
ECSchema ecSchema, int stripeCellSize) {
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
fileLength = flength;
|
||||
blocks = blks;
|
||||
underConstruction = isUnderConstuction;
|
||||
this.lastLocatedBlock = lastBlock;
|
||||
this.isLastBlockComplete = isLastBlockCompleted;
|
||||
this.fileEncryptionInfo = feInfo;
|
||||
this.ecSchema = ecSchema;
|
||||
this.stripeCellSize = stripeCellSize;
|
||||
this.ecPolicy = ecPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -120,17 +116,10 @@ public class LocatedBlocks {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return The ECSchema for ErasureCoded file, null otherwise.
|
||||
* @return The ECPolicy for ErasureCoded file, null otherwise.
|
||||
*/
|
||||
public ECSchema getECSchema() {
|
||||
return ecSchema;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Stripe Cell size for ErasureCoded file, 0 otherwise.
|
||||
*/
|
||||
public int getStripeCellSize() {
|
||||
return stripeCellSize;
|
||||
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||
return ecPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -61,7 +61,7 @@ public class SnapshottableDirectoryStatus {
|
|||
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
|
||||
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
|
||||
access_time, permission, owner, group, null, localName, inodeId,
|
||||
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null, 0);
|
||||
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
|
||||
this.snapshotNumber = snapshotNumber;
|
||||
this.snapshotQuota = snapshotQuota;
|
||||
this.parentFullPath = parentFullPath;
|
||||
|
|
|
@ -132,7 +132,7 @@ class JsonUtilClient {
|
|||
blockSize, mTime, aTime, permission, owner, group,
|
||||
symlink, DFSUtilClient.string2Bytes(localName),
|
||||
fileId, childrenNum, null,
|
||||
storagePolicy, null, 0);
|
||||
storagePolicy, null);
|
||||
}
|
||||
|
||||
/** Convert a Json map to an ExtendedBlock object. */
|
||||
|
@ -479,7 +479,7 @@ class JsonUtilClient {
|
|||
(Map<?, ?>) m.get("lastLocatedBlock"));
|
||||
final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
|
||||
return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
|
||||
lastLocatedBlock, isLastBlockComplete, null, null, 0);
|
||||
lastLocatedBlock, isLastBlockComplete, null, null);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -394,3 +394,6 @@
|
|||
|
||||
HDFS-8827. Erasure Coding: Fix NPE when NameNode processes over-replicated
|
||||
striped blocks. (Walter Su and Takuya Fukudome via jing9)
|
||||
|
||||
HDFS-8854. Erasure coding: add ECPolicy to replace schema+cellSize in
|
||||
hadoop-hdfs. (Walter Su via zhz)
|
||||
|
|
|
@ -165,7 +165,7 @@ import org.apache.hadoop.io.EnumSetWritable;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.MD5Hash;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -1194,10 +1194,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
try {
|
||||
LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
|
||||
if (locatedBlocks != null) {
|
||||
ECSchema schema = locatedBlocks.getECSchema();
|
||||
if (schema != null) {
|
||||
return new DFSStripedInputStream(this, src, verifyChecksum, schema,
|
||||
locatedBlocks.getStripeCellSize(), locatedBlocks);
|
||||
ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy();
|
||||
if (ecPolicy != null) {
|
||||
return new DFSStripedInputStream(this, src, verifyChecksum, ecPolicy,
|
||||
locatedBlocks);
|
||||
}
|
||||
return new DFSInputStream(this, src, verifyChecksum, locatedBlocks);
|
||||
} else {
|
||||
|
@ -3011,12 +3011,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
return new EncryptionZoneIterator(namenode, traceSampler);
|
||||
}
|
||||
|
||||
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
|
||||
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("createErasureCodingZone", src);
|
||||
try {
|
||||
namenode.createErasureCodingZone(src, schema, cellSize);
|
||||
namenode.createErasureCodingZone(src, ecPolicy);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
SafeModeException.class,
|
||||
|
@ -3138,11 +3138,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
}
|
||||
|
||||
public ECSchema[] getECSchemas() throws IOException {
|
||||
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("getECSchemas", traceSampler);
|
||||
TraceScope scope = Trace.startSpan("getErasureCodingPolicies", traceSampler);
|
||||
try {
|
||||
return namenode.getECSchemas();
|
||||
return namenode.getErasureCodingPolicies();
|
||||
} finally {
|
||||
scope.close();
|
||||
}
|
||||
|
|
|
@ -680,12 +680,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final boolean DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT =
|
||||
false;
|
||||
|
||||
public static final String DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE =
|
||||
"dfs.client.striped.read.threadpool.size";
|
||||
// With default 3+2 schema, each normal read could span 3 DNs. So this
|
||||
// default value accommodates 6 read streams
|
||||
public static final int DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE = 18;
|
||||
|
||||
// Slow io warning log threshold settings for dfsclient and datanode.
|
||||
public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
|
||||
"dfs.datanode.slow.io.warning.threshold.ms";
|
||||
|
|
|
@ -271,7 +271,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
}
|
||||
Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
|
||||
final DFSOutputStream out;
|
||||
if(stat.getECSchema() != null) {
|
||||
if(stat.getErasureCodingPolicy() != null) {
|
||||
out = new DFSStripedOutputStream(dfsClient, src, stat,
|
||||
flag, progress, checksum, favoredNodes);
|
||||
} else {
|
||||
|
|
|
@ -36,7 +36,7 @@ import static org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResu
|
|||
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.util.DirectBufferPool;
|
||||
|
@ -147,7 +147,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
/** the buffer for a complete stripe */
|
||||
private ByteBuffer curStripeBuf;
|
||||
private ByteBuffer parityBuf;
|
||||
private final ECSchema schema;
|
||||
private final ErasureCodingPolicy ecPolicy;
|
||||
private final RawErasureDecoder decoder;
|
||||
|
||||
/**
|
||||
|
@ -158,15 +158,15 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
private final CompletionService<Void> readingService;
|
||||
|
||||
DFSStripedInputStream(DFSClient dfsClient, String src,
|
||||
boolean verifyChecksum, ECSchema schema, int cellSize,
|
||||
boolean verifyChecksum, ErasureCodingPolicy ecPolicy,
|
||||
LocatedBlocks locatedBlocks) throws IOException {
|
||||
super(dfsClient, src, verifyChecksum, locatedBlocks);
|
||||
|
||||
assert schema != null;
|
||||
this.schema = schema;
|
||||
this.cellSize = cellSize;
|
||||
dataBlkNum = (short) schema.getNumDataUnits();
|
||||
parityBlkNum = (short) schema.getNumParityUnits();
|
||||
assert ecPolicy != null;
|
||||
this.ecPolicy = ecPolicy;
|
||||
this.cellSize = ecPolicy.getCellSize();
|
||||
dataBlkNum = (short) ecPolicy.getNumDataUnits();
|
||||
parityBlkNum = (short) ecPolicy.getNumParityUnits();
|
||||
groupSize = dataBlkNum + parityBlkNum;
|
||||
blockReaders = new BlockReaderInfo[groupSize];
|
||||
curStripeRange = new StripeRange(0, 0);
|
||||
|
@ -282,7 +282,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
stripeLimit - stripeBufOffset);
|
||||
|
||||
LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
|
||||
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(schema, cellSize,
|
||||
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, cellSize,
|
||||
blockGroup, offsetInBlockGroup,
|
||||
offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
|
||||
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
||||
|
@ -510,7 +510,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
|
||||
|
||||
AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
|
||||
schema, cellSize, blockGroup, start, end, buf, offset);
|
||||
ecPolicy, cellSize, blockGroup, start, end, buf, offset);
|
||||
CompletionService<Void> readService = new ExecutorCompletionService<>(
|
||||
dfsClient.getStripedReadsThreadPool());
|
||||
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
|||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.io.MultipleIOException;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
@ -276,10 +276,10 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
|||
LOG.debug("Creating DFSStripedOutputStream for " + src);
|
||||
}
|
||||
|
||||
final ECSchema schema = stat.getECSchema();
|
||||
final int numParityBlocks = schema.getNumParityUnits();
|
||||
cellSize = stat.getStripeCellSize();
|
||||
numDataBlocks = schema.getNumDataUnits();
|
||||
final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
|
||||
final int numParityBlocks = ecPolicy.getNumParityUnits();
|
||||
cellSize = ecPolicy.getCellSize();
|
||||
numDataBlocks = ecPolicy.getNumDataUnits();
|
||||
numAllBlocks = numDataBlocks + numParityBlocks;
|
||||
|
||||
encoder = CodecUtil.createRSRawEncoder(dfsClient.getConfiguration(),
|
||||
|
|
|
@ -90,7 +90,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
|||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
|
@ -2280,18 +2280,17 @@ public class DistributedFileSystem extends FileSystem {
|
|||
* Create the erasurecoding zone
|
||||
*
|
||||
* @param path Directory to create the ec zone
|
||||
* @param schema ECSchema for the zone. If not specified default will be used.
|
||||
* @param cellSize Cellsize for the striped erasure coding
|
||||
* @param ecPolicy erasure coding policy for the zone. If not specified default will be used.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void createErasureCodingZone(final Path path, final ECSchema schema,
|
||||
final int cellSize) throws IOException {
|
||||
public void createErasureCodingZone(final Path path, final ErasureCodingPolicy ecPolicy)
|
||||
throws IOException {
|
||||
Path absF = fixRelativePart(path);
|
||||
new FileSystemLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void doCall(final Path p) throws IOException,
|
||||
UnresolvedLinkException {
|
||||
dfs.createErasureCodingZone(getPathName(p), schema, cellSize);
|
||||
dfs.createErasureCodingZone(getPathName(p), ecPolicy);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -2299,7 +2298,7 @@ public class DistributedFileSystem extends FileSystem {
|
|||
public Void next(final FileSystem fs, final Path p) throws IOException {
|
||||
if (fs instanceof DistributedFileSystem) {
|
||||
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
|
||||
myDfs.createErasureCodingZone(p, schema, cellSize);
|
||||
myDfs.createErasureCodingZone(p, ecPolicy);
|
||||
return null;
|
||||
}
|
||||
throw new UnsupportedOperationException(
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
|
||||
/**
|
||||
* The public API for performing administrative functions on HDFS. Those writing
|
||||
|
@ -369,17 +369,13 @@ public class HdfsAdmin {
|
|||
/**
|
||||
* Create the ErasureCoding zone
|
||||
*
|
||||
* @param path
|
||||
* Directory to create the ErasureCoding zone
|
||||
* @param schema
|
||||
* ECSchema for the zone. If not specified default will be used.
|
||||
* @param cellSize
|
||||
* Cellsize for the striped ErasureCoding
|
||||
* @param path Directory to create the ErasureCoding zone
|
||||
* @param ecPolicy erasure coding policy for the zone. If null, the default will be used.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void createErasureCodingZone(final Path path, final ECSchema schema,
|
||||
final int cellSize) throws IOException {
|
||||
dfs.createErasureCodingZone(path, schema, cellSize);
|
||||
public void createErasureCodingZone(final Path path,
|
||||
final ErasureCodingPolicy ecPolicy) throws IOException {
|
||||
dfs.createErasureCodingZone(path, ecPolicy);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -395,12 +391,11 @@ public class HdfsAdmin {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the ErasureCoding schemas supported.
|
||||
* Get the ErasureCoding policies supported.
|
||||
*
|
||||
* @return ECSchemas
|
||||
* @throws IOException
|
||||
*/
|
||||
public ECSchema[] getECSchemas() throws IOException {
|
||||
return dfs.getClient().getECSchemas();
|
||||
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||
return dfs.getClient().getErasureCodingPolicies();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,6 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
|||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.io.retry.AtMostOnce;
|
||||
import org.apache.hadoop.io.retry.Idempotent;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
@ -1459,21 +1458,20 @@ public interface ClientProtocol {
|
|||
public EventBatchList getEditsFromTxid(long txid) throws IOException;
|
||||
|
||||
/**
|
||||
* Create an erasure coding zone with specified schema, if any, otherwise
|
||||
* Create an erasure coding zone with specified policy, if any, otherwise
|
||||
* default
|
||||
*/
|
||||
@AtMostOnce
|
||||
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
|
||||
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Gets list of ECSchemas loaded in Namenode
|
||||
* Get the erasure coding policies loaded in Namenode
|
||||
*
|
||||
* @return Returns the list of ECSchemas loaded at Namenode
|
||||
* @throws IOException
|
||||
*/
|
||||
@Idempotent
|
||||
public ECSchema[] getECSchemas() throws IOException;
|
||||
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException;
|
||||
|
||||
/**
|
||||
* Get the information about the EC zone for the path
|
||||
|
|
|
@ -16,21 +16,17 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
/**
|
||||
* Information about the EC Zone at the specified path.
|
||||
*/
|
||||
public class ErasureCodingZone {
|
||||
|
||||
private String dir;
|
||||
private ECSchema schema;
|
||||
private int cellSize;
|
||||
private ErasureCodingPolicy ecPolicy;
|
||||
|
||||
public ErasureCodingZone(String dir, ECSchema schema, int cellSize) {
|
||||
public ErasureCodingZone(String dir, ErasureCodingPolicy ecPolicy) {
|
||||
this.dir = dir;
|
||||
this.schema = schema;
|
||||
this.cellSize = cellSize;
|
||||
this.ecPolicy = ecPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -43,24 +39,16 @@ public class ErasureCodingZone {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the schema for the EC Zone
|
||||
* Get the erasure coding policy for the EC Zone
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public ECSchema getSchema() {
|
||||
return schema;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cellSize for the EC Zone
|
||||
*/
|
||||
public int getCellSize() {
|
||||
return cellSize;
|
||||
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||
return ecPolicy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Dir: " + getDir() + ", Schema: " + schema + ", cellSize: "
|
||||
+ cellSize;
|
||||
return "Dir: " + getDir() + ", Policy: " + ecPolicy;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.fs.LocatedFileStatus;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
/**
|
||||
* Interface that represents the over the wire information
|
||||
|
@ -60,10 +59,10 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
|||
long access_time, FsPermission permission, String owner, String group,
|
||||
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
|
||||
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
|
||||
ECSchema schema, int stripeCellSize) {
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
super(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path, fileId,
|
||||
childrenNum, feInfo, storagePolicy, schema, stripeCellSize);
|
||||
childrenNum, feInfo, storagePolicy, ecPolicy);
|
||||
this.locations = locations;
|
||||
}
|
||||
|
||||
|
|
|
@ -199,8 +199,8 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathR
|
|||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
|
||||
|
@ -220,7 +220,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
|||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
|
||||
|
@ -1403,10 +1403,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
RpcController controller, CreateErasureCodingZoneRequestProto req)
|
||||
throws ServiceException {
|
||||
try {
|
||||
ECSchema schema = req.hasSchema() ? PBHelper.convertECSchema(req
|
||||
.getSchema()) : null;
|
||||
int cellSize = req.hasCellSize() ? req.getCellSize() : 0;
|
||||
server.createErasureCodingZone(req.getSrc(), schema, cellSize);
|
||||
ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(req
|
||||
.getEcPolicy()) : null;
|
||||
server.createErasureCodingZone(req.getSrc(), ecPolicy);
|
||||
return CreateErasureCodingZoneResponseProto.newBuilder().build();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
|
@ -1522,14 +1521,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public GetECSchemasResponseProto getECSchemas(RpcController controller,
|
||||
GetECSchemasRequestProto request) throws ServiceException {
|
||||
public GetErasureCodingPoliciesResponseProto getErasureCodingPolicies(RpcController controller,
|
||||
GetErasureCodingPoliciesRequestProto request) throws ServiceException {
|
||||
try {
|
||||
ECSchema[] ecSchemas = server.getECSchemas();
|
||||
GetECSchemasResponseProto.Builder resBuilder = GetECSchemasResponseProto
|
||||
ErasureCodingPolicy[] ecPolicies = server.getErasureCodingPolicies();
|
||||
GetErasureCodingPoliciesResponseProto.Builder resBuilder = GetErasureCodingPoliciesResponseProto
|
||||
.newBuilder();
|
||||
for (ECSchema ecSchema : ecSchemas) {
|
||||
resBuilder.addSchemas(PBHelper.convertECSchema(ecSchema));
|
||||
for (ErasureCodingPolicy ecPolicy : ecPolicies) {
|
||||
resBuilder.addEcPolicies(PBHelper.convertErasureCodingPolicy(ecPolicy));
|
||||
}
|
||||
return resBuilder.build();
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -165,12 +165,12 @@ import org.apache.hadoop.hdfs.protocol.proto.*;
|
|||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
|
||||
|
@ -182,7 +182,7 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
|||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
|
@ -240,8 +240,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
VOID_GET_STORAGE_POLICIES_REQUEST =
|
||||
GetStoragePoliciesRequestProto.newBuilder().build();
|
||||
|
||||
private final static GetECSchemasRequestProto
|
||||
VOID_GET_ECSCHEMAS_REQUEST = GetECSchemasRequestProto
|
||||
private final static GetErasureCodingPoliciesRequestProto
|
||||
VOID_GET_EC_POLICIES_REQUEST = GetErasureCodingPoliciesRequestProto
|
||||
.newBuilder().build();
|
||||
|
||||
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
|
||||
|
@ -1419,16 +1419,13 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
|
||||
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
|
||||
throws IOException {
|
||||
final CreateErasureCodingZoneRequestProto.Builder builder =
|
||||
CreateErasureCodingZoneRequestProto.newBuilder();
|
||||
builder.setSrc(src);
|
||||
if (schema != null) {
|
||||
builder.setSchema(PBHelper.convertECSchema(schema));
|
||||
}
|
||||
if (cellSize > 0) {
|
||||
builder.setCellSize(cellSize);
|
||||
if (ecPolicy != null) {
|
||||
builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy));
|
||||
}
|
||||
CreateErasureCodingZoneRequestProto req = builder.build();
|
||||
try {
|
||||
|
@ -1550,16 +1547,17 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public ECSchema[] getECSchemas() throws IOException {
|
||||
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||
try {
|
||||
GetECSchemasResponseProto response = rpcProxy.getECSchemas(null,
|
||||
VOID_GET_ECSCHEMAS_REQUEST);
|
||||
ECSchema[] schemas = new ECSchema[response.getSchemasCount()];
|
||||
GetErasureCodingPoliciesResponseProto response = rpcProxy
|
||||
.getErasureCodingPolicies(null, VOID_GET_EC_POLICIES_REQUEST);
|
||||
ErasureCodingPolicy[] ecPolicies =
|
||||
new ErasureCodingPolicy[response.getEcPoliciesCount()];
|
||||
int i = 0;
|
||||
for (ECSchemaProto schemaProto : response.getSchemasList()) {
|
||||
schemas[i++] = PBHelper.convertECSchema(schemaProto);
|
||||
for (ErasureCodingPolicyProto ecPolicyProto : response.getEcPoliciesList()) {
|
||||
ecPolicies[i++] = PBHelper.convertErasureCodingPolicy(ecPolicyProto);
|
||||
}
|
||||
return schemas;
|
||||
return ecPolicies;
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
|||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
|
@ -137,6 +138,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecovery
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
|
||||
|
@ -1348,8 +1350,7 @@ public class PBHelper {
|
|||
PBHelper.convertLocatedBlockProto(lb.getLastBlock()) : null,
|
||||
lb.getIsLastBlockComplete(),
|
||||
lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : null,
|
||||
lb.hasECSchema() ? convertECSchema(lb.getECSchema()) : null,
|
||||
lb.hasStripeCellSize() ? lb.getStripeCellSize() : 0);
|
||||
lb.hasEcPolicy() ? convertErasureCodingPolicy(lb.getEcPolicy()) : null);
|
||||
}
|
||||
|
||||
public static LocatedBlocksProto convert(LocatedBlocks lb) {
|
||||
|
@ -1365,11 +1366,8 @@ public class PBHelper {
|
|||
if (lb.getFileEncryptionInfo() != null) {
|
||||
builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
|
||||
}
|
||||
if (lb.getECSchema() != null) {
|
||||
builder.setECSchema(convertECSchema(lb.getECSchema()));
|
||||
}
|
||||
if (lb.getStripeCellSize() != 0) {
|
||||
builder.setStripeCellSize(lb.getStripeCellSize());
|
||||
if (lb.getErasureCodingPolicy() != null) {
|
||||
builder.setEcPolicy(convertErasureCodingPolicy(lb.getErasureCodingPolicy()));
|
||||
}
|
||||
return builder.setFileLength(lb.getFileLength())
|
||||
.setUnderConstruction(lb.isUnderConstruction())
|
||||
|
@ -1514,8 +1512,7 @@ public class PBHelper {
|
|||
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
|
||||
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
|
||||
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
|
||||
fs.hasEcSchema() ? PBHelper.convertECSchema(fs.getEcSchema()) : null,
|
||||
fs.hasStripeCellSize() ? fs.getStripeCellSize() : 0);
|
||||
fs.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(fs.getEcPolicy()) : null);
|
||||
}
|
||||
|
||||
public static SnapshottableDirectoryStatus convert(
|
||||
|
@ -1576,10 +1573,9 @@ public class PBHelper {
|
|||
builder.setLocations(PBHelper.convert(locations));
|
||||
}
|
||||
}
|
||||
if(fs.getECSchema() != null) {
|
||||
builder.setEcSchema(PBHelper.convertECSchema(fs.getECSchema()));
|
||||
if(fs.getErasureCodingPolicy() != null) {
|
||||
builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(fs.getErasureCodingPolicy()));
|
||||
}
|
||||
builder.setStripeCellSize(fs.getStripeCellSize());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -3137,13 +3133,12 @@ public class PBHelper {
|
|||
for (ECSchemaOptionEntryProto option : optionsList) {
|
||||
options.put(option.getKey(), option.getValue());
|
||||
}
|
||||
return new ECSchema(schema.getSchemaName(), schema.getCodecName(),
|
||||
schema.getDataUnits(), schema.getParityUnits(), options);
|
||||
return new ECSchema(schema.getCodecName(), schema.getDataUnits(),
|
||||
schema.getParityUnits(), options);
|
||||
}
|
||||
|
||||
public static ECSchemaProto convertECSchema(ECSchema schema) {
|
||||
ECSchemaProto.Builder builder = ECSchemaProto.newBuilder()
|
||||
.setSchemaName(schema.getSchemaName())
|
||||
.setCodecName(schema.getCodecName())
|
||||
.setDataUnits(schema.getNumDataUnits())
|
||||
.setParityUnits(schema.getNumParityUnits());
|
||||
|
@ -3155,17 +3150,34 @@ public class PBHelper {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
public static ErasureCodingPolicy convertErasureCodingPolicy(
|
||||
ErasureCodingPolicyProto policy) {
|
||||
return new ErasureCodingPolicy(policy.getName(),
|
||||
convertECSchema(policy.getSchema()),
|
||||
policy.getCellSize());
|
||||
}
|
||||
|
||||
public static ErasureCodingPolicyProto convertErasureCodingPolicy(
|
||||
ErasureCodingPolicy policy) {
|
||||
ErasureCodingPolicyProto.Builder builder = ErasureCodingPolicyProto
|
||||
.newBuilder()
|
||||
.setName(policy.getName())
|
||||
.setSchema(convertECSchema(policy.getSchema()))
|
||||
.setCellSize(policy.getCellSize());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static ErasureCodingZoneProto convertErasureCodingZone(
|
||||
ErasureCodingZone ecZone) {
|
||||
return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir())
|
||||
.setSchema(convertECSchema(ecZone.getSchema()))
|
||||
.setCellSize(ecZone.getCellSize()).build();
|
||||
.setEcPolicy(convertErasureCodingPolicy(ecZone.getErasureCodingPolicy()))
|
||||
.build();
|
||||
}
|
||||
|
||||
public static ErasureCodingZone convertErasureCodingZone(
|
||||
ErasureCodingZoneProto ecZoneProto) {
|
||||
return new ErasureCodingZone(ecZoneProto.getDir(),
|
||||
convertECSchema(ecZoneProto.getSchema()), ecZoneProto.getCellSize());
|
||||
convertErasureCodingPolicy(ecZoneProto.getEcPolicy()));
|
||||
}
|
||||
|
||||
public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
|
||||
|
@ -3198,12 +3210,11 @@ public class PBHelper {
|
|||
liveBlkIndices[i] = liveBlockIndicesList.get(i).shortValue();
|
||||
}
|
||||
|
||||
ECSchema ecSchema = convertECSchema(blockEcRecoveryInfoProto.getEcSchema());
|
||||
int cellSize = blockEcRecoveryInfoProto.getCellSize();
|
||||
ErasureCodingPolicy ecPolicy =
|
||||
convertErasureCodingPolicy(blockEcRecoveryInfoProto.getEcPolicy());
|
||||
|
||||
return new BlockECRecoveryInfo(block, sourceDnInfos, targetDnInfos,
|
||||
targetStorageUuids, convertStorageTypes, liveBlkIndices, ecSchema,
|
||||
cellSize);
|
||||
targetStorageUuids, convertStorageTypes, liveBlkIndices, ecPolicy);
|
||||
}
|
||||
|
||||
public static BlockECRecoveryInfoProto convertBlockECRecoveryInfo(
|
||||
|
@ -3228,8 +3239,8 @@ public class PBHelper {
|
|||
short[] liveBlockIndices = blockEcRecoveryInfo.getLiveBlockIndices();
|
||||
builder.addAllLiveBlockIndices(convertIntArray(liveBlockIndices));
|
||||
|
||||
builder.setEcSchema(convertECSchema(blockEcRecoveryInfo.getECSchema()));
|
||||
builder.setCellSize(blockEcRecoveryInfo.getCellSize());
|
||||
builder.setEcPolicy(convertErasureCodingPolicy(blockEcRecoveryInfo
|
||||
.getErasureCodingPolicy()));
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
|
||||
|
@ -38,8 +38,7 @@ import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_S
|
|||
* array to record the block index for each triplet.
|
||||
*/
|
||||
public class BlockInfoStriped extends BlockInfo {
|
||||
private final ECSchema schema;
|
||||
private final int cellSize;
|
||||
private final ErasureCodingPolicy ecPolicy;
|
||||
/**
|
||||
* Always the same size with triplets. Record the block index for each triplet
|
||||
* TODO: actually this is only necessary for over-replicated block. Thus can
|
||||
|
@ -47,36 +46,34 @@ public class BlockInfoStriped extends BlockInfo {
|
|||
*/
|
||||
private byte[] indices;
|
||||
|
||||
public BlockInfoStriped(Block blk, ECSchema schema, int cellSize) {
|
||||
super(blk, (short) (schema.getNumDataUnits() + schema.getNumParityUnits()));
|
||||
indices = new byte[schema.getNumDataUnits() + schema.getNumParityUnits()];
|
||||
public BlockInfoStriped(Block blk, ErasureCodingPolicy ecPolicy) {
|
||||
super(blk, (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()));
|
||||
indices = new byte[ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()];
|
||||
initIndices();
|
||||
this.schema = schema;
|
||||
this.cellSize = cellSize;
|
||||
this.ecPolicy = ecPolicy;
|
||||
}
|
||||
|
||||
BlockInfoStriped(BlockInfoStriped b) {
|
||||
this(b, b.getSchema(), b.getCellSize());
|
||||
this(b, b.getErasureCodingPolicy());
|
||||
this.setBlockCollection(b.getBlockCollection());
|
||||
}
|
||||
|
||||
public short getTotalBlockNum() {
|
||||
return (short) (this.schema.getNumDataUnits()
|
||||
+ this.schema.getNumParityUnits());
|
||||
return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
|
||||
}
|
||||
|
||||
public short getDataBlockNum() {
|
||||
return (short) this.schema.getNumDataUnits();
|
||||
return (short) ecPolicy.getNumDataUnits();
|
||||
}
|
||||
|
||||
public short getParityBlockNum() {
|
||||
return (short) this.schema.getNumParityUnits();
|
||||
return (short) ecPolicy.getNumParityUnits();
|
||||
}
|
||||
|
||||
/**
|
||||
* If the block is committed/completed and its length is less than a full
|
||||
* stripe, it returns the the number of actual data blocks.
|
||||
* Otherwise it returns the number of data units specified by schema.
|
||||
* Otherwise it returns the number of data units specified by erasure coding policy.
|
||||
*/
|
||||
public short getRealDataBlockNum() {
|
||||
if (isComplete() || getBlockUCState() == BlockUCState.COMMITTED) {
|
||||
|
@ -91,12 +88,8 @@ public class BlockInfoStriped extends BlockInfo {
|
|||
return (short) (getRealDataBlockNum() + getParityBlockNum());
|
||||
}
|
||||
|
||||
public ECSchema getSchema() {
|
||||
return schema;
|
||||
}
|
||||
|
||||
public int getCellSize() {
|
||||
return cellSize;
|
||||
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||
return ecPolicy;
|
||||
}
|
||||
|
||||
private void initIndices() {
|
||||
|
@ -230,7 +223,7 @@ public class BlockInfoStriped extends BlockInfo {
|
|||
// be the total of data blocks and parity blocks because
|
||||
// `getNumBytes` is the total of actual data block size.
|
||||
return StripedBlockUtil.spaceConsumedByStripedBlock(getNumBytes(),
|
||||
this.schema.getNumDataUnits(), this.schema.getNumParityUnits(),
|
||||
ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits(),
|
||||
BLOCK_STRIPED_CELL_SIZE);
|
||||
}
|
||||
|
||||
|
@ -260,7 +253,7 @@ public class BlockInfoStriped extends BlockInfo {
|
|||
BlockUCState s, DatanodeStorageInfo[] targets) {
|
||||
final BlockInfoUnderConstructionStriped ucBlock;
|
||||
if(isComplete()) {
|
||||
ucBlock = new BlockInfoUnderConstructionStriped(this, schema, cellSize,
|
||||
ucBlock = new BlockInfoUnderConstructionStriped(this, ecPolicy,
|
||||
s, targets);
|
||||
ucBlock.setBlockCollection(getBlockCollection());
|
||||
} else {
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -57,17 +57,16 @@ public class BlockInfoUnderConstructionStriped extends BlockInfoStriped
|
|||
/**
|
||||
* Constructor with null storage targets.
|
||||
*/
|
||||
public BlockInfoUnderConstructionStriped(Block blk, ECSchema schema,
|
||||
int cellSize) {
|
||||
this(blk, schema, cellSize, UNDER_CONSTRUCTION, null);
|
||||
public BlockInfoUnderConstructionStriped(Block blk, ErasureCodingPolicy ecPolicy) {
|
||||
this(blk, ecPolicy, UNDER_CONSTRUCTION, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a striped block that is currently being constructed.
|
||||
*/
|
||||
public BlockInfoUnderConstructionStriped(Block blk, ECSchema schema,
|
||||
int cellSize, BlockUCState state, DatanodeStorageInfo[] targets) {
|
||||
super(blk, schema, cellSize);
|
||||
public BlockInfoUnderConstructionStriped(Block blk, ErasureCodingPolicy ecPolicy,
|
||||
BlockUCState state, DatanodeStorageInfo[] targets) {
|
||||
super(blk, ecPolicy);
|
||||
assert getBlockUCState() != COMPLETE :
|
||||
"BlockInfoUnderConstructionStriped cannot be in COMPLETE state";
|
||||
this.blockUCState = state;
|
||||
|
|
|
@ -85,7 +85,7 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
|||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
|
||||
|
@ -948,14 +948,13 @@ public class BlockManager {
|
|||
ErasureCodingZone ecZone)
|
||||
throws IOException {
|
||||
assert namesystem.hasReadLock();
|
||||
final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
|
||||
final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
|
||||
final ErasureCodingPolicy ecPolicy = ecZone != null ? ecZone
|
||||
.getErasureCodingPolicy() : null;
|
||||
if (blocks == null) {
|
||||
return null;
|
||||
} else if (blocks.length == 0) {
|
||||
return new LocatedBlocks(0, isFileUnderConstruction,
|
||||
Collections.<LocatedBlock> emptyList(), null, false, feInfo, schema,
|
||||
cellSize);
|
||||
Collections.<LocatedBlock> emptyList(), null, false, feInfo, ecPolicy);
|
||||
} else {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("blocks = " + java.util.Arrays.asList(blocks));
|
||||
|
@ -980,7 +979,7 @@ public class BlockManager {
|
|||
}
|
||||
return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction,
|
||||
isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
|
||||
schema, cellSize);
|
||||
ecPolicy);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1597,7 +1596,7 @@ public class BlockManager {
|
|||
.warn("Failed to get the EC zone for the file {} ", src);
|
||||
}
|
||||
if (ecZone == null) {
|
||||
blockLog.warn("No EC schema found for the file {}. "
|
||||
blockLog.warn("No erasure coding policy found for the file {}. "
|
||||
+ "So cannot proceed for recovery", src);
|
||||
// TODO: we may have to revisit later for what we can do better to
|
||||
// handle this case.
|
||||
|
@ -1607,7 +1606,7 @@ public class BlockManager {
|
|||
new ExtendedBlock(namesystem.getBlockPoolId(), block),
|
||||
rw.srcNodes, rw.targets,
|
||||
((ErasureCodingWork) rw).liveBlockIndicies,
|
||||
ecZone.getSchema(), ecZone.getCellSize());
|
||||
ecZone.getErasureCodingPolicy());
|
||||
} else {
|
||||
rw.srcNodes[0].addBlockToBeReplicated(block, targets);
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
|||
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||
import org.apache.hadoop.hdfs.util.EnumCounters;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.util.IntrusiveCollection;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
|
@ -610,10 +610,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
*/
|
||||
void addBlockToBeErasureCoded(ExtendedBlock block,
|
||||
DatanodeDescriptor[] sources, DatanodeStorageInfo[] targets,
|
||||
short[] liveBlockIndices, ECSchema ecSchema, int cellSize) {
|
||||
short[] liveBlockIndices, ErasureCodingPolicy ecPolicy) {
|
||||
assert (block != null && sources != null && sources.length > 0);
|
||||
BlockECRecoveryInfo task = new BlockECRecoveryInfo(block, sources, targets,
|
||||
liveBlockIndices, ecSchema, cellSize);
|
||||
liveBlockIndices, ecPolicy);
|
||||
erasurecodeBlocks.offer(task);
|
||||
BlockManager.LOG.debug("Adding block recovery task " + task + "to "
|
||||
+ getName() + ", current queue size is " + erasurecodeBlocks.size());
|
||||
|
|
|
@ -54,7 +54,7 @@ public class StorageLocation {
|
|||
// drop any (illegal) authority in the URI for backwards compatibility
|
||||
this.file = new File(uri.getPath());
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unsupported URI schema in " + uri);
|
||||
throw new IllegalArgumentException("Unsupported URI ecPolicy in " + uri);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -69,8 +69,7 @@ import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
|||
import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResult;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
@ -267,10 +266,10 @@ public final class ErasureCodingWorker {
|
|||
new ExecutorCompletionService<>(STRIPED_READ_THREAD_POOL);
|
||||
|
||||
ReconstructAndTransferBlock(BlockECRecoveryInfo recoveryInfo) {
|
||||
ECSchema schema = recoveryInfo.getECSchema();
|
||||
dataBlkNum = schema.getNumDataUnits();
|
||||
parityBlkNum = schema.getNumParityUnits();
|
||||
cellSize = recoveryInfo.getCellSize();
|
||||
ErasureCodingPolicy ecPolicy = recoveryInfo.getErasureCodingPolicy();
|
||||
dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
parityBlkNum = ecPolicy.getNumParityUnits();
|
||||
cellSize = ecPolicy.getCellSize();
|
||||
|
||||
blockGroup = recoveryInfo.getExtendedBlock();
|
||||
final int cellsNum = (int)((blockGroup.getNumBytes() - 1) / cellSize + 1);
|
||||
|
|
|
@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
|
|||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
@ -177,7 +177,7 @@ public class Mover {
|
|||
}
|
||||
|
||||
DBlock newDBlock(LocatedBlock lb, List<MLocation> locations,
|
||||
ECSchema ecSchema) {
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
Block blk = lb.getBlock().getLocalBlock();
|
||||
DBlock db;
|
||||
if (lb.isStriped()) {
|
||||
|
@ -186,7 +186,7 @@ public class Mover {
|
|||
for (int i = 0; i < indices.length; i++) {
|
||||
indices[i] = (byte) lsb.getBlockIndices()[i];
|
||||
}
|
||||
db = new DBlockStriped(blk, indices, (short) ecSchema.getNumDataUnits());
|
||||
db = new DBlockStriped(blk, indices, (short) ecPolicy.getNumDataUnits());
|
||||
} else {
|
||||
db = new DBlock(blk);
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ public class Mover {
|
|||
List<StorageType> types = policy.chooseStorageTypes(
|
||||
status.getReplication());
|
||||
|
||||
final ECSchema ecSchema = status.getECSchema();
|
||||
final ErasureCodingPolicy ecPolicy = status.getErasureCodingPolicy();
|
||||
final LocatedBlocks locatedBlocks = status.getBlockLocations();
|
||||
boolean hasRemaining = false;
|
||||
final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
|
||||
|
@ -390,7 +390,7 @@ public class Mover {
|
|||
final StorageTypeDiff diff = new StorageTypeDiff(types,
|
||||
lb.getStorageTypes());
|
||||
if (!diff.removeOverlap(true)) {
|
||||
if (scheduleMoves4Block(diff, lb, ecSchema)) {
|
||||
if (scheduleMoves4Block(diff, lb, ecPolicy)) {
|
||||
hasRemaining |= (diff.existing.size() > 1 &&
|
||||
diff.expected.size() > 1);
|
||||
}
|
||||
|
@ -400,12 +400,12 @@ public class Mover {
|
|||
}
|
||||
|
||||
boolean scheduleMoves4Block(StorageTypeDiff diff, LocatedBlock lb,
|
||||
ECSchema ecSchema) {
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
final List<MLocation> locations = MLocation.toLocations(lb);
|
||||
if (!(lb instanceof LocatedStripedBlock)) {
|
||||
Collections.shuffle(locations);
|
||||
}
|
||||
final DBlock db = newDBlock(lb, locations, ecSchema);
|
||||
final DBlock db = newDBlock(lb, locations, ecPolicy);
|
||||
|
||||
for (final StorageType t : diff.existing) {
|
||||
for (final MLocation ml : locations) {
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* This manages erasure coding policies predefined and activated in the system.
|
||||
* It loads customized policies and syncs with persisted ones in
|
||||
* NameNode image.
|
||||
*
|
||||
* This class is instantiated by the FSNamesystem.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||
public final class ErasureCodingPolicyManager {
|
||||
|
||||
/**
|
||||
* TODO: HDFS-8095
|
||||
*/
|
||||
private static final int DEFAULT_DATA_BLOCKS = 6;
|
||||
private static final int DEFAULT_PARITY_BLOCKS = 3;
|
||||
private static final int DEFAULT_CELLSIZE = 64 * 1024;
|
||||
private static final String DEFAULT_CODEC_NAME = "rs";
|
||||
private static final String DEFAULT_POLICY_NAME = "RS-6-3-64k";
|
||||
private static final ECSchema SYS_DEFAULT_SCHEMA = new ECSchema(
|
||||
DEFAULT_CODEC_NAME, DEFAULT_DATA_BLOCKS, DEFAULT_PARITY_BLOCKS);
|
||||
private static final ErasureCodingPolicy SYS_DEFAULT_POLICY =
|
||||
new ErasureCodingPolicy(DEFAULT_POLICY_NAME, SYS_DEFAULT_SCHEMA,
|
||||
DEFAULT_CELLSIZE);
|
||||
|
||||
//We may add more later.
|
||||
private static ErasureCodingPolicy[] SYS_POLICY = new ErasureCodingPolicy[] {
|
||||
SYS_DEFAULT_POLICY
|
||||
};
|
||||
|
||||
/**
|
||||
* All active policies maintained in NN memory for fast querying,
|
||||
* identified and sorted by its name.
|
||||
*/
|
||||
private final Map<String, ErasureCodingPolicy> activePolicies;
|
||||
|
||||
ErasureCodingPolicyManager() {
|
||||
|
||||
this.activePolicies = new TreeMap<>();
|
||||
for (ErasureCodingPolicy policy : SYS_POLICY) {
|
||||
activePolicies.put(policy.getName(), policy);
|
||||
}
|
||||
|
||||
/**
|
||||
* TODO: HDFS-7859 persist into NameNode
|
||||
* load persistent policies from image and editlog, which is done only once
|
||||
* during NameNode startup. This can be done here or in a separate method.
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system defined policies.
|
||||
* @return system policies
|
||||
*/
|
||||
public static ErasureCodingPolicy[] getSystemPolices() {
|
||||
return SYS_POLICY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system-wide default policy, which can be used by default
|
||||
* when no policy is specified for an EC zone.
|
||||
* @return ecPolicy
|
||||
*/
|
||||
public static ErasureCodingPolicy getSystemDefaultPolicy() {
|
||||
return SYS_DEFAULT_POLICY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all policies that's available to use.
|
||||
* @return all policies
|
||||
*/
|
||||
public ErasureCodingPolicy[] getPolicies() {
|
||||
ErasureCodingPolicy[] results = new ErasureCodingPolicy[activePolicies.size()];
|
||||
return activePolicies.values().toArray(results);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the policy specified by the policy name.
|
||||
*/
|
||||
public ErasureCodingPolicy getPolicy(String name) {
|
||||
return activePolicies.get(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear and clean up
|
||||
*/
|
||||
public void clear() {
|
||||
activePolicies.clear();
|
||||
}
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* This manages EC schemas predefined and activated in the system.
|
||||
* It loads customized schemas and syncs with persisted ones in
|
||||
* NameNode image.
|
||||
*
|
||||
* This class is instantiated by the FSNamesystem.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||
public final class ErasureCodingSchemaManager {
|
||||
|
||||
/**
|
||||
* TODO: HDFS-8095
|
||||
*/
|
||||
private static final int DEFAULT_DATA_BLOCKS = 6;
|
||||
private static final int DEFAULT_PARITY_BLOCKS = 3;
|
||||
private static final String DEFAULT_CODEC_NAME = "rs";
|
||||
private static final String DEFAULT_SCHEMA_NAME = "RS-6-3";
|
||||
private static final ECSchema SYS_DEFAULT_SCHEMA =
|
||||
new ECSchema(DEFAULT_SCHEMA_NAME,
|
||||
DEFAULT_CODEC_NAME, DEFAULT_DATA_BLOCKS, DEFAULT_PARITY_BLOCKS);
|
||||
|
||||
//We may add more later.
|
||||
private static ECSchema[] SYS_SCHEMAS = new ECSchema[] {
|
||||
SYS_DEFAULT_SCHEMA
|
||||
};
|
||||
|
||||
/**
|
||||
* All active EC activeSchemas maintained in NN memory for fast querying,
|
||||
* identified and sorted by its name.
|
||||
*/
|
||||
private final Map<String, ECSchema> activeSchemas;
|
||||
|
||||
ErasureCodingSchemaManager() {
|
||||
|
||||
this.activeSchemas = new TreeMap<String, ECSchema>();
|
||||
for (ECSchema schema : SYS_SCHEMAS) {
|
||||
activeSchemas.put(schema.getSchemaName(), schema);
|
||||
}
|
||||
|
||||
/**
|
||||
* TODO: HDFS-7859 persist into NameNode
|
||||
* load persistent schemas from image and editlog, which is done only once
|
||||
* during NameNode startup. This can be done here or in a separate method.
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system defined schemas.
|
||||
* @return system schemas
|
||||
*/
|
||||
public static ECSchema[] getSystemSchemas() {
|
||||
return SYS_SCHEMAS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system-wide default EC schema, which can be used by default when no
|
||||
* schema is specified for an EC zone.
|
||||
* @return schema
|
||||
*/
|
||||
public static ECSchema getSystemDefaultSchema() {
|
||||
return SYS_DEFAULT_SCHEMA;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tell the specified schema is the system default one or not.
|
||||
* @param schema
|
||||
* @return true if it's the default false otherwise
|
||||
*/
|
||||
public static boolean isSystemDefault(ECSchema schema) {
|
||||
if (schema == null) {
|
||||
throw new IllegalArgumentException("Invalid schema parameter");
|
||||
}
|
||||
|
||||
// schema name is the identifier.
|
||||
return SYS_DEFAULT_SCHEMA.getSchemaName().equals(schema.getSchemaName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all EC schemas that's available to use.
|
||||
* @return all EC schemas
|
||||
*/
|
||||
public ECSchema[] getSchemas() {
|
||||
ECSchema[] results = new ECSchema[activeSchemas.size()];
|
||||
return activeSchemas.values().toArray(results);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the EC schema specified by the schema name.
|
||||
* @param schemaName
|
||||
* @return EC schema specified by the schema name
|
||||
*/
|
||||
public ECSchema getSchema(String schemaName) {
|
||||
return activeSchemas.get(schemaName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear and clean up
|
||||
*/
|
||||
public void clear() {
|
||||
activeSchemas.clear();
|
||||
}
|
||||
}
|
|
@ -23,11 +23,10 @@ import com.google.common.collect.Lists;
|
|||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
@ -60,9 +59,9 @@ public class ErasureCodingZoneManager {
|
|||
this.dir = dir;
|
||||
}
|
||||
|
||||
ECSchema getErasureCodingSchema(INodesInPath iip) throws IOException {
|
||||
ErasureCodingPolicy getErasureCodingPolicy(INodesInPath iip) throws IOException {
|
||||
ErasureCodingZone ecZone = getErasureCodingZone(iip);
|
||||
return ecZone == null ? null : ecZone.getSchema();
|
||||
return ecZone == null ? null : ecZone.getErasureCodingPolicy();
|
||||
}
|
||||
|
||||
ErasureCodingZone getErasureCodingZone(INodesInPath iip) throws IOException {
|
||||
|
@ -88,12 +87,11 @@ public class ErasureCodingZoneManager {
|
|||
if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) {
|
||||
ByteArrayInputStream bIn=new ByteArrayInputStream(xAttr.getValue());
|
||||
DataInputStream dIn=new DataInputStream(bIn);
|
||||
int cellSize = WritableUtils.readVInt(dIn);
|
||||
String schemaName = WritableUtils.readString(dIn);
|
||||
ECSchema schema = dir.getFSNamesystem()
|
||||
.getErasureCodingSchemaManager().getSchema(schemaName);
|
||||
String ecPolicyName = WritableUtils.readString(dIn);
|
||||
ErasureCodingPolicy ecPolicy = dir.getFSNamesystem()
|
||||
.getErasureCodingPolicyManager().getPolicy(ecPolicyName);
|
||||
return new ErasureCodingZone(dir.getInode(inode.getId())
|
||||
.getFullPathName(), schema, cellSize);
|
||||
.getFullPathName(), ecPolicy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -101,7 +99,7 @@ public class ErasureCodingZoneManager {
|
|||
}
|
||||
|
||||
List<XAttr> createErasureCodingZone(final INodesInPath srcIIP,
|
||||
ECSchema schema, int cellSize) throws IOException {
|
||||
ErasureCodingPolicy ecPolicy) throws IOException {
|
||||
assert dir.hasWriteLock();
|
||||
Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
|
||||
String src = srcIIP.getPath();
|
||||
|
@ -115,29 +113,22 @@ public class ErasureCodingZoneManager {
|
|||
throw new IOException("Attempt to create an erasure coding zone " +
|
||||
"for a file " + src);
|
||||
}
|
||||
if (getErasureCodingSchema(srcIIP) != null) {
|
||||
if (getErasureCodingPolicy(srcIIP) != null) {
|
||||
throw new IOException("Directory " + src + " is already in an " +
|
||||
"erasure coding zone.");
|
||||
}
|
||||
|
||||
// System default schema will be used since no specified.
|
||||
if (schema == null) {
|
||||
schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
// System default erasure coding policy will be used since no specified.
|
||||
if (ecPolicy == null) {
|
||||
ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
}
|
||||
|
||||
if (cellSize <= 0) {
|
||||
cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
}
|
||||
|
||||
// Write the cellsize first and then schema name
|
||||
final XAttr ecXAttr;
|
||||
DataOutputStream dOut = null;
|
||||
try {
|
||||
ByteArrayOutputStream bOut = new ByteArrayOutputStream();
|
||||
dOut = new DataOutputStream(bOut);
|
||||
WritableUtils.writeVInt(dOut, cellSize);
|
||||
// Now persist the schema name in xattr
|
||||
WritableUtils.writeString(dOut, schema.getSchemaName());
|
||||
WritableUtils.writeString(dOut, ecPolicy.getName());
|
||||
ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_ZONE,
|
||||
bOut.toByteArray());
|
||||
} finally {
|
||||
|
@ -158,10 +149,12 @@ public class ErasureCodingZoneManager {
|
|||
if (srcZone != null && srcZone.getDir().equals(src) && dstZone == null) {
|
||||
return;
|
||||
}
|
||||
final ECSchema srcSchema = (srcZone != null) ? srcZone.getSchema() : null;
|
||||
final ECSchema dstSchema = (dstZone != null) ? dstZone.getSchema() : null;
|
||||
if ((srcSchema != null && !srcSchema.equals(dstSchema)) ||
|
||||
(dstSchema != null && !dstSchema.equals(srcSchema))) {
|
||||
final ErasureCodingPolicy srcECPolicy =
|
||||
srcZone != null ? srcZone.getErasureCodingPolicy() : null;
|
||||
final ErasureCodingPolicy dstECPolicy =
|
||||
dstZone != null ? dstZone.getErasureCodingPolicy() : null;
|
||||
if (srcECPolicy != null && !srcECPolicy.equals(dstECPolicy) ||
|
||||
dstECPolicy != null && !dstECPolicy.equals(srcECPolicy)) {
|
||||
throw new IOException(
|
||||
src + " can't be moved because the source and destination have " +
|
||||
"different erasure coding policies.");
|
||||
|
|
|
@ -22,9 +22,9 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
/**
|
||||
* Helper class to perform erasure coding related operations.
|
||||
|
@ -43,15 +43,14 @@ final class FSDirErasureCodingOp {
|
|||
* @param fsn namespace
|
||||
* @param srcArg the path of a directory which will be the root of the
|
||||
* erasure coding zone. The directory must be empty.
|
||||
* @param schema ECSchema for the erasure coding zone
|
||||
* @param cellSize Cell size of stripe
|
||||
* @param ecPolicy erasure coding policy for the erasure coding zone
|
||||
* @param logRetryCache whether to record RPC ids in editlog for retry
|
||||
* cache rebuilding
|
||||
* @return {@link HdfsFileStatus}
|
||||
* @throws IOException
|
||||
*/
|
||||
static HdfsFileStatus createErasureCodingZone(final FSNamesystem fsn,
|
||||
final String srcArg, final ECSchema schema, final int cellSize,
|
||||
final String srcArg, final ErasureCodingPolicy ecPolicy,
|
||||
final boolean logRetryCache) throws IOException {
|
||||
assert fsn.hasWriteLock();
|
||||
|
||||
|
@ -68,7 +67,7 @@ final class FSDirErasureCodingOp {
|
|||
try {
|
||||
iip = fsd.getINodesInPath4Write(src, false);
|
||||
xAttrs = fsn.getErasureCodingZoneManager().createErasureCodingZone(
|
||||
iip, schema, cellSize);
|
||||
iip, ecPolicy);
|
||||
} finally {
|
||||
fsd.writeUnlock();
|
||||
}
|
||||
|
@ -120,7 +119,7 @@ final class FSDirErasureCodingOp {
|
|||
assert fsn.hasReadLock();
|
||||
|
||||
final INodesInPath iip = getINodesInPath(fsn, srcArg);
|
||||
return getErasureCodingSchemaForPath(fsn, iip) != null;
|
||||
return getErasureCodingPolicyForPath(fsn, iip) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,49 +132,35 @@ final class FSDirErasureCodingOp {
|
|||
*/
|
||||
static boolean isInErasureCodingZone(final FSNamesystem fsn,
|
||||
final INodesInPath iip) throws IOException {
|
||||
return getErasureCodingSchema(fsn, iip) != null;
|
||||
return getErasureCodingPolicy(fsn, iip) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get erasure coding schema.
|
||||
* Get the erasure coding policy.
|
||||
*
|
||||
* @param fsn namespace
|
||||
* @param iip inodes in the path containing the file
|
||||
* @return {@link ECSchema}
|
||||
* @return {@link ErasureCodingPolicy}
|
||||
* @throws IOException
|
||||
*/
|
||||
static ECSchema getErasureCodingSchema(final FSNamesystem fsn,
|
||||
static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
|
||||
final INodesInPath iip) throws IOException {
|
||||
assert fsn.hasReadLock();
|
||||
|
||||
return getErasureCodingSchemaForPath(fsn, iip);
|
||||
return getErasureCodingPolicyForPath(fsn, iip);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available erasure coding schemas.
|
||||
* Get available erasure coding polices.
|
||||
*
|
||||
* @param fsn namespace
|
||||
* @return {@link ECSchema} array
|
||||
* @return {@link ErasureCodingPolicy} array
|
||||
*/
|
||||
static ECSchema[] getErasureCodingSchemas(final FSNamesystem fsn)
|
||||
static ErasureCodingPolicy[] getErasureCodingPolicies(final FSNamesystem fsn)
|
||||
throws IOException {
|
||||
assert fsn.hasReadLock();
|
||||
|
||||
return fsn.getErasureCodingSchemaManager().getSchemas();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the ECSchema specified by the name.
|
||||
*
|
||||
* @param fsn namespace
|
||||
* @param schemaName schema name
|
||||
* @return {@link ECSchema}
|
||||
*/
|
||||
static ECSchema getErasureCodingSchema(final FSNamesystem fsn,
|
||||
final String schemaName) throws IOException {
|
||||
assert fsn.hasReadLock();
|
||||
|
||||
return fsn.getErasureCodingSchemaManager().getSchema(schemaName);
|
||||
return fsn.getErasureCodingPolicyManager().getPolicies();
|
||||
}
|
||||
|
||||
private static INodesInPath getINodesInPath(final FSNamesystem fsn,
|
||||
|
@ -204,12 +189,12 @@ final class FSDirErasureCodingOp {
|
|||
}
|
||||
}
|
||||
|
||||
private static ECSchema getErasureCodingSchemaForPath(final FSNamesystem fsn,
|
||||
private static ErasureCodingPolicy getErasureCodingPolicyForPath(final FSNamesystem fsn,
|
||||
final INodesInPath iip) throws IOException {
|
||||
final FSDirectory fsd = fsn.getFSDirectory();
|
||||
fsd.readLock();
|
||||
try {
|
||||
return fsn.getErasureCodingZoneManager().getErasureCodingSchema(iip);
|
||||
return fsn.getErasureCodingZoneManager().getErasureCodingPolicy(iip);
|
||||
} finally {
|
||||
fsd.readUnlock();
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.fs.permission.FsAction;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
|
@ -40,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
|||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
@ -318,7 +318,7 @@ class FSDirStatAndListingOp {
|
|||
if (fsd.getINode4DotSnapshot(srcs) != null) {
|
||||
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
|
||||
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null, 0);
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -388,8 +388,8 @@ class FSDirStatAndListingOp {
|
|||
|
||||
final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
|
||||
fsd.getFSNamesystem(), iip);
|
||||
final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
|
||||
final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
|
||||
final ErasureCodingPolicy ecPolicy =
|
||||
ecZone != null ? ecZone.getErasureCodingPolicy() : null;
|
||||
|
||||
if (node.isFile()) {
|
||||
final INodeFile fileNode = node.asFile();
|
||||
|
@ -421,8 +421,7 @@ class FSDirStatAndListingOp {
|
|||
childrenNum,
|
||||
feInfo,
|
||||
storagePolicy,
|
||||
schema,
|
||||
cellSize);
|
||||
ecPolicy);
|
||||
}
|
||||
|
||||
private static INodeAttributes getINodeAttributes(
|
||||
|
@ -471,8 +470,8 @@ class FSDirStatAndListingOp {
|
|||
}
|
||||
int childrenNum = node.isDirectory() ?
|
||||
node.asDirectory().getChildrenNum(snapshot) : 0;
|
||||
final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
|
||||
final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
|
||||
final ErasureCodingPolicy ecPolicy =
|
||||
ecZone != null ? ecZone.getErasureCodingPolicy() : null;
|
||||
|
||||
HdfsLocatedFileStatus status =
|
||||
new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
|
||||
|
@ -481,8 +480,7 @@ class FSDirStatAndListingOp {
|
|||
getPermissionForFileStatus(nodeAttrs, isEncrypted),
|
||||
nodeAttrs.getUserName(), nodeAttrs.getGroupName(),
|
||||
node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
|
||||
node.getId(), loc, childrenNum, feInfo, storagePolicy, schema,
|
||||
cellSize);
|
||||
node.getId(), loc, childrenNum, feInfo, storagePolicy, ecPolicy);
|
||||
// Set caching information for the located blocks.
|
||||
if (loc != null) {
|
||||
CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -51,7 +52,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.net.Node;
|
||||
import org.apache.hadoop.net.NodeBase;
|
||||
import org.apache.hadoop.util.ChunkedArrayList;
|
||||
|
@ -532,16 +532,15 @@ class FSDirWriteFileOp {
|
|||
if (isStriped) {
|
||||
ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
|
||||
fsd.getFSNamesystem(), inodesInPath);
|
||||
ECSchema ecSchema = ecZone.getSchema();
|
||||
short numDataUnits = (short) ecSchema.getNumDataUnits();
|
||||
short numParityUnits = (short) ecSchema.getNumParityUnits();
|
||||
ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy();
|
||||
short numDataUnits = (short) ecPolicy.getNumDataUnits();
|
||||
short numParityUnits = (short) ecPolicy.getNumParityUnits();
|
||||
short numLocations = (short) (numDataUnits + numParityUnits);
|
||||
|
||||
// check quota limits and updated space consumed
|
||||
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
|
||||
numLocations, true);
|
||||
blockInfo = new BlockInfoUnderConstructionStriped(block, ecSchema,
|
||||
ecZone.getCellSize(),
|
||||
blockInfo = new BlockInfoUnderConstructionStriped(block, ecPolicy,
|
||||
HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
|
||||
} else {
|
||||
// check quota limits and updated space consumed
|
||||
|
|
|
@ -992,7 +992,7 @@ public class FSEditLogLoader {
|
|||
boolean isStriped = ecZone != null;
|
||||
if (isStriped) {
|
||||
newBlockInfo = new BlockInfoUnderConstructionStriped(newBlock,
|
||||
ecZone.getSchema(), ecZone.getCellSize());
|
||||
ecZone.getErasureCodingPolicy());
|
||||
} else {
|
||||
newBlockInfo = new BlockInfoUnderConstructionContiguous(newBlock,
|
||||
file.getPreferredBlockReplication());
|
||||
|
@ -1078,7 +1078,7 @@ public class FSEditLogLoader {
|
|||
// until several blocks in?
|
||||
if (isStriped) {
|
||||
newBI = new BlockInfoUnderConstructionStriped(newBlock,
|
||||
ecZone.getSchema(), ecZone.getCellSize());
|
||||
ecZone.getErasureCodingPolicy());
|
||||
} else {
|
||||
newBI = new BlockInfoUnderConstructionContiguous(newBlock,
|
||||
file.getPreferredBlockReplication());
|
||||
|
@ -1088,11 +1088,9 @@ public class FSEditLogLoader {
|
|||
// is only executed when loading edits written by prior
|
||||
// versions of Hadoop. Current versions always log
|
||||
// OP_ADD operations as each block is allocated.
|
||||
// TODO: ECSchema can be restored from persisted file (HDFS-7859).
|
||||
if (isStriped) {
|
||||
newBI = new BlockInfoStriped(newBlock,
|
||||
ErasureCodingSchemaManager.getSystemDefaultSchema(),
|
||||
ecZone.getCellSize());
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
} else {
|
||||
newBI = new BlockInfoContiguous(newBlock,
|
||||
file.getPreferredBlockReplication());
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||
|
@ -66,7 +67,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
|||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.protobuf.ByteString;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public final class FSImageFormatPBINode {
|
||||
|
@ -327,17 +327,13 @@ public final class FSImageFormatPBINode {
|
|||
short replication = (short) f.getReplication();
|
||||
boolean isStriped = f.getIsStriped();
|
||||
LoaderContext state = parent.getLoaderContext();
|
||||
ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
|
||||
if (isStriped) {
|
||||
Preconditions.checkState(f.hasStripingCellSize());
|
||||
}
|
||||
BlockInfo[] blocks = new BlockInfo[bp.size()];
|
||||
for (int i = 0; i < bp.size(); ++i) {
|
||||
BlockProto b = bp.get(i);
|
||||
if (isStriped) {
|
||||
blocks[i] = new BlockInfoStriped(PBHelper.convert(b), schema,
|
||||
(int)f.getStripingCellSize());
|
||||
blocks[i] = new BlockInfoStriped(PBHelper.convert(b), ecPolicy);
|
||||
} else {
|
||||
blocks[i] = new BlockInfoContiguous(PBHelper.convert(b),
|
||||
replication);
|
||||
|
@ -373,8 +369,7 @@ public final class FSImageFormatPBINode {
|
|||
final BlockInfo ucBlk;
|
||||
if (isStriped) {
|
||||
BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
|
||||
ucBlk = new BlockInfoUnderConstructionStriped(striped,
|
||||
schema, (int)f.getStripingCellSize());
|
||||
ucBlk = new BlockInfoUnderConstructionStriped(striped, ecPolicy);
|
||||
} else {
|
||||
ucBlk = new BlockInfoUnderConstructionContiguous(lastBlk,
|
||||
replication);
|
||||
|
@ -656,16 +651,6 @@ public final class FSImageFormatPBINode {
|
|||
}
|
||||
}
|
||||
|
||||
if (n.isStriped()) {
|
||||
if (blocks != null && blocks.length > 0) {
|
||||
BlockInfo firstBlock = blocks[0];
|
||||
Preconditions.checkState(firstBlock.isStriped());
|
||||
b.setStripingCellSize(((BlockInfoStriped)firstBlock).getCellSize());
|
||||
} else {
|
||||
b.setStripingCellSize(HdfsConstants.BLOCK_STRIPED_CELL_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
|
||||
if (uc != null) {
|
||||
INodeSection.FileUnderConstructionFeature f =
|
||||
|
|
|
@ -180,6 +180,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -262,7 +263,6 @@ import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
|||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.ipc.RetriableException;
|
||||
import org.apache.hadoop.ipc.RetryCache;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
|
@ -426,7 +426,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
private final BlockManager blockManager;
|
||||
private final SnapshotManager snapshotManager;
|
||||
private final CacheManager cacheManager;
|
||||
private final ErasureCodingSchemaManager ecSchemaManager;
|
||||
private final ErasureCodingPolicyManager ecPolicyManager;
|
||||
private final DatanodeStatistics datanodeStatistics;
|
||||
|
||||
private String nameserviceId;
|
||||
|
@ -606,7 +606,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
leaseManager.removeAllLeases();
|
||||
snapshotManager.clearSnapshottableDirs();
|
||||
cacheManager.clear();
|
||||
ecSchemaManager.clear();
|
||||
ecPolicyManager.clear();
|
||||
setImageLoaded(false);
|
||||
blockManager.clear();
|
||||
}
|
||||
|
@ -846,7 +846,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
this.dir = new FSDirectory(this, conf);
|
||||
this.snapshotManager = new SnapshotManager(dir);
|
||||
this.cacheManager = new CacheManager(this, conf, blockManager);
|
||||
this.ecSchemaManager = new ErasureCodingSchemaManager();
|
||||
this.ecPolicyManager = new ErasureCodingPolicyManager();
|
||||
this.safeMode = new SafeModeInfo(conf);
|
||||
this.topConf = new TopConf(conf);
|
||||
this.auditLoggers = initAuditLoggers(conf);
|
||||
|
@ -3679,16 +3679,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
if (fileINode.isStriped()) {
|
||||
final ErasureCodingZone ecZone = FSDirErasureCodingOp
|
||||
.getErasureCodingZone(this, iip);
|
||||
final ECSchema ecSchema = ecZone.getSchema();
|
||||
final short numDataUnits = (short) ecSchema.getNumDataUnits();
|
||||
final short numParityUnits = (short) ecSchema.getNumParityUnits();
|
||||
final ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy();
|
||||
final short numDataUnits = (short) ecPolicy.getNumDataUnits();
|
||||
final short numParityUnits = (short) ecPolicy.getNumParityUnits();
|
||||
|
||||
final long numBlocks = numDataUnits + numParityUnits;
|
||||
final long fullBlockGroupSize =
|
||||
fileINode.getPreferredBlockSize() * numBlocks;
|
||||
|
||||
final BlockInfoStriped striped = new BlockInfoStriped(commitBlock,
|
||||
ecSchema, ecZone.getCellSize());
|
||||
ecPolicy);
|
||||
final long actualBlockGroupSize = striped.spaceConsumed();
|
||||
|
||||
diff = fullBlockGroupSize - actualBlockGroupSize;
|
||||
|
@ -6676,9 +6676,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
return cacheManager;
|
||||
}
|
||||
|
||||
/** @return the ErasureCodingSchemaManager. */
|
||||
public ErasureCodingSchemaManager getErasureCodingSchemaManager() {
|
||||
return ecSchemaManager;
|
||||
/** @return the ErasureCodingPolicyManager. */
|
||||
public ErasureCodingPolicyManager getErasureCodingPolicyManager() {
|
||||
return ecPolicyManager;
|
||||
}
|
||||
|
||||
/** @return the ErasureCodingZoneManager. */
|
||||
|
@ -7581,14 +7581,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
* Create an erasure coding zone on directory src.
|
||||
* @param srcArg the path of a directory which will be the root of the
|
||||
* erasure coding zone. The directory must be empty.
|
||||
* @param schema ECSchema for the erasure coding zone
|
||||
* @param cellSize Cell size of stripe
|
||||
* @param ecPolicy erasure coding policy for the erasure coding zone
|
||||
* @throws AccessControlException if the caller is not the superuser.
|
||||
* @throws UnresolvedLinkException if the path can't be resolved.
|
||||
* @throws SafeModeException if the Namenode is in safe mode.
|
||||
*/
|
||||
void createErasureCodingZone(final String srcArg, final ECSchema schema,
|
||||
int cellSize, final boolean logRetryCache) throws IOException,
|
||||
void createErasureCodingZone(final String srcArg, final ErasureCodingPolicy
|
||||
ecPolicy, final boolean logRetryCache) throws IOException,
|
||||
UnresolvedLinkException, SafeModeException, AccessControlException {
|
||||
checkSuperuserPrivilege();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7599,7 +7598,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
checkOperation(OperationCategory.WRITE);
|
||||
checkNameNodeSafeMode("Cannot create erasure coding zone on " + srcArg);
|
||||
resultingStat = FSDirErasureCodingOp.createErasureCodingZone(this,
|
||||
srcArg, schema, cellSize, logRetryCache);
|
||||
srcArg, ecPolicy, logRetryCache);
|
||||
success = true;
|
||||
} finally {
|
||||
writeUnlock();
|
||||
|
@ -7627,30 +7626,15 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
}
|
||||
|
||||
/**
|
||||
* Get available erasure coding schemas
|
||||
* Get available erasure coding polices
|
||||
*/
|
||||
ECSchema[] getErasureCodingSchemas() throws IOException {
|
||||
ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||
checkOperation(OperationCategory.READ);
|
||||
waitForLoadingFSImage();
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
return FSDirErasureCodingOp.getErasureCodingSchemas(this);
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the ECSchema specified by the name
|
||||
*/
|
||||
ECSchema getErasureCodingSchema(String schemaName) throws IOException {
|
||||
checkOperation(OperationCategory.READ);
|
||||
waitForLoadingFSImage();
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
return FSDirErasureCodingOp.getErasureCodingSchema(this, schemaName);
|
||||
return FSDirErasureCodingOp.getErasureCodingPolicies(this);
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
|
|
|
@ -84,6 +84,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -143,7 +144,6 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
|||
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RetryCache;
|
||||
|
@ -1823,7 +1823,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
|
||||
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
|
||||
throws IOException {
|
||||
checkNNStartup();
|
||||
final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
|
||||
|
@ -1832,8 +1832,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
}
|
||||
boolean success = false;
|
||||
try {
|
||||
namesystem.createErasureCodingZone(src, schema, cellSize,
|
||||
cacheEntry != null);
|
||||
namesystem.createErasureCodingZone(src, ecPolicy, cacheEntry != null);
|
||||
success = true;
|
||||
} finally {
|
||||
RetryCache.setState(cacheEntry, success);
|
||||
|
@ -2035,9 +2034,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
public ECSchema[] getECSchemas() throws IOException {
|
||||
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||
checkNNStartup();
|
||||
return namesystem.getErasureCodingSchemas();
|
||||
return namesystem.getErasureCodingPolicies();
|
||||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
|
|
|
@ -572,7 +572,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
|
||||
// count expected replicas
|
||||
short targetFileReplication;
|
||||
if (file.getECSchema() != null) {
|
||||
if (file.getErasureCodingPolicy() != null) {
|
||||
assert storedBlock instanceof BlockInfoStriped;
|
||||
targetFileReplication = ((BlockInfoStriped) storedBlock)
|
||||
.getRealTotalBlockNum();
|
||||
|
@ -1158,11 +1158,11 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
|
||||
@VisibleForTesting
|
||||
static class ErasureCodingResult extends Result {
|
||||
final String defaultSchema;
|
||||
final String defaultECPolicy;
|
||||
|
||||
ErasureCodingResult(Configuration conf) {
|
||||
defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema()
|
||||
.getSchemaName();
|
||||
defaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy()
|
||||
.getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1239,7 +1239,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks))
|
||||
.append(" %)");
|
||||
}
|
||||
res.append("\n Default schema:\t\t").append(defaultSchema)
|
||||
res.append("\n Default ecPolicy:\t\t").append(defaultECPolicy)
|
||||
.append("\n Average block group size:\t").append(
|
||||
getReplicationFactor()).append("\n Missing block groups:\t\t").append(
|
||||
missingIds.size()).append("\n Corrupt block groups:\t\t").append(
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.fs.StorageType;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -77,31 +77,28 @@ public class BlockECRecoveryCommand extends DatanodeCommand {
|
|||
private String[] targetStorageIDs;
|
||||
private StorageType[] targetStorageTypes;
|
||||
private final short[] liveBlockIndices;
|
||||
private final ECSchema ecSchema;
|
||||
private final int cellSize;
|
||||
private final ErasureCodingPolicy ecPolicy;
|
||||
|
||||
public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
|
||||
DatanodeStorageInfo[] targetDnStorageInfo, short[] liveBlockIndices,
|
||||
ECSchema ecSchema, int cellSize) {
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
this(block, sources, DatanodeStorageInfo
|
||||
.toDatanodeInfos(targetDnStorageInfo), DatanodeStorageInfo
|
||||
.toStorageIDs(targetDnStorageInfo), DatanodeStorageInfo
|
||||
.toStorageTypes(targetDnStorageInfo), liveBlockIndices, ecSchema,
|
||||
cellSize);
|
||||
.toStorageTypes(targetDnStorageInfo), liveBlockIndices, ecPolicy);
|
||||
}
|
||||
|
||||
public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
|
||||
DatanodeInfo[] targets, String[] targetStorageIDs,
|
||||
StorageType[] targetStorageTypes, short[] liveBlockIndices,
|
||||
ECSchema ecSchema, int cellSize) {
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
this.block = block;
|
||||
this.sources = sources;
|
||||
this.targets = targets;
|
||||
this.targetStorageIDs = targetStorageIDs;
|
||||
this.targetStorageTypes = targetStorageTypes;
|
||||
this.liveBlockIndices = liveBlockIndices;
|
||||
this.ecSchema = ecSchema;
|
||||
this.cellSize = cellSize;
|
||||
this.ecPolicy = ecPolicy;
|
||||
}
|
||||
|
||||
public ExtendedBlock getExtendedBlock() {
|
||||
|
@ -128,12 +125,8 @@ public class BlockECRecoveryCommand extends DatanodeCommand {
|
|||
return liveBlockIndices;
|
||||
}
|
||||
|
||||
public ECSchema getECSchema() {
|
||||
return ecSchema;
|
||||
}
|
||||
|
||||
public int getCellSize() {
|
||||
return cellSize;
|
||||
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||
return ecPolicy;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -31,9 +31,8 @@ import org.apache.hadoop.fs.shell.CommandFactory;
|
|||
import org.apache.hadoop.fs.shell.PathData;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
|
@ -49,7 +48,7 @@ public abstract class ECCommand extends Command {
|
|||
factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME);
|
||||
factory.addClass(GetECZoneCommand.class, "-"
|
||||
+ GetECZoneCommand.NAME);
|
||||
factory.addClass(ListECSchemas.class, "-" + ListECSchemas.NAME);
|
||||
factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -77,35 +76,24 @@ public abstract class ECCommand extends Command {
|
|||
}
|
||||
|
||||
/**
|
||||
* Create EC encoding zone command. Zones are created to use specific EC
|
||||
* encoding schema, other than default while encoding the files under some
|
||||
* specific directory.
|
||||
* A command to create an EC zone for a path, with a erasure coding policy name.
|
||||
*/
|
||||
static class CreateECZoneCommand extends ECCommand {
|
||||
public static final String NAME = "createZone";
|
||||
public static final String USAGE = "[-s <schemaName>] [-c <cellSize>] <path>";
|
||||
public static final String USAGE = "[-s <policyName>] <path>";
|
||||
public static final String DESCRIPTION =
|
||||
"Create a zone to encode files using a specified schema\n"
|
||||
"Create a zone to encode files using a specified policy\n"
|
||||
+ "Options :\n"
|
||||
+ " -s <schemaName> : EC schema name to encode files. "
|
||||
+ "If not passed default schema will be used\n"
|
||||
+ " -c <cellSize> : cell size to use for striped encoding files."
|
||||
+ " If not passed default cellsize of "
|
||||
+ HdfsConstants.BLOCK_STRIPED_CELL_SIZE + " will be used\n"
|
||||
+ " -s <policyName> : erasure coding policy name to encode files. "
|
||||
+ "If not passed the default policy will be used\n"
|
||||
+ " <path> : Path to an empty directory. Under this directory "
|
||||
+ "files will be encoded using specified schema";
|
||||
private String schemaName;
|
||||
private int cellSize = 0;
|
||||
private ECSchema schema = null;
|
||||
+ "files will be encoded using specified erasure coding policy";
|
||||
private String ecPolicyName;
|
||||
private ErasureCodingPolicy ecPolicy = null;
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
schemaName = StringUtils.popOptionWithArgument("-s", args);
|
||||
String cellSizeStr = StringUtils.popOptionWithArgument("-c", args);
|
||||
if (cellSizeStr != null) {
|
||||
cellSize = (int) StringUtils.TraditionalBinaryPrefix
|
||||
.string2long(cellSizeStr);
|
||||
}
|
||||
ecPolicyName = StringUtils.popOptionWithArgument("-s", args);
|
||||
if (args.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("<path> is missing");
|
||||
}
|
||||
|
@ -119,29 +107,29 @@ public abstract class ECCommand extends Command {
|
|||
super.processPath(item);
|
||||
DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
|
||||
try {
|
||||
if (schemaName != null) {
|
||||
ECSchema[] ecSchemas = dfs.getClient().getECSchemas();
|
||||
for (ECSchema ecSchema : ecSchemas) {
|
||||
if (schemaName.equals(ecSchema.getSchemaName())) {
|
||||
schema = ecSchema;
|
||||
if (ecPolicyName != null) {
|
||||
ErasureCodingPolicy[] ecPolicies = dfs.getClient().getErasureCodingPolicies();
|
||||
for (ErasureCodingPolicy ecPolicy : ecPolicies) {
|
||||
if (ecPolicyName.equals(ecPolicy.getName())) {
|
||||
this.ecPolicy = ecPolicy;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (schema == null) {
|
||||
if (ecPolicy == null) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Schema '");
|
||||
sb.append(schemaName);
|
||||
sb.append("' does not match any of the supported schemas.");
|
||||
sb.append("Policy '");
|
||||
sb.append(ecPolicyName);
|
||||
sb.append("' does not match any of the supported policies.");
|
||||
sb.append(" Please select any one of ");
|
||||
List<String> schemaNames = new ArrayList<String>();
|
||||
for (ECSchema ecSchema : ecSchemas) {
|
||||
schemaNames.add(ecSchema.getSchemaName());
|
||||
List<String> ecPolicyNames = new ArrayList<String>();
|
||||
for (ErasureCodingPolicy ecPolicy : ecPolicies) {
|
||||
ecPolicyNames.add(ecPolicy.getName());
|
||||
}
|
||||
sb.append(schemaNames);
|
||||
sb.append(ecPolicyNames);
|
||||
throw new HadoopIllegalArgumentException(sb.toString());
|
||||
}
|
||||
}
|
||||
dfs.createErasureCodingZone(item.path, schema, cellSize);
|
||||
dfs.createErasureCodingZone(item.path, ecPolicy);
|
||||
out.println("EC Zone created successfully at " + item.path);
|
||||
} catch (IOException e) {
|
||||
throw new IOException("Unable to create EC zone for the path "
|
||||
|
@ -188,13 +176,13 @@ public abstract class ECCommand extends Command {
|
|||
}
|
||||
|
||||
/**
|
||||
* List all supported EC Schemas
|
||||
* List all supported erasure coding policies
|
||||
*/
|
||||
static class ListECSchemas extends ECCommand {
|
||||
public static final String NAME = "listSchemas";
|
||||
static class ListPolicies extends ECCommand {
|
||||
public static final String NAME = "listPolicies";
|
||||
public static final String USAGE = "";
|
||||
public static final String DESCRIPTION =
|
||||
"Get the list of ECSchemas supported\n";
|
||||
"Get the list of erasure coding policies supported\n";
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
|
@ -209,14 +197,14 @@ public abstract class ECCommand extends Command {
|
|||
}
|
||||
DistributedFileSystem dfs = (DistributedFileSystem) fs;
|
||||
|
||||
ECSchema[] ecSchemas = dfs.getClient().getECSchemas();
|
||||
ErasureCodingPolicy[] ecPolicies = dfs.getClient().getErasureCodingPolicies();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
int i = 0;
|
||||
while (i < ecSchemas.length) {
|
||||
ECSchema ecSchema = ecSchemas[i];
|
||||
sb.append(ecSchema.getSchemaName());
|
||||
while (i < ecPolicies.length) {
|
||||
ErasureCodingPolicy ecPolicy = ecPolicies[i];
|
||||
sb.append(ecPolicy.getName());
|
||||
i++;
|
||||
if (i < ecSchemas.length) {
|
||||
if (i < ecPolicies.length) {
|
||||
sb.append(", ");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
|||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
|
@ -318,7 +318,7 @@ public class StripedBlockUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Decode based on the given input buffers and schema.
|
||||
* Decode based on the given input buffers and erasure coding policy.
|
||||
*/
|
||||
public static void decodeAndFillBuffer(final byte[][] decodeInputs,
|
||||
AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
|
||||
|
@ -355,20 +355,20 @@ public class StripedBlockUtil {
|
|||
* by stateful read and uses ByteBuffer as reading target buffer. Besides the
|
||||
* read range is within a single stripe thus the calculation logic is simpler.
|
||||
*/
|
||||
public static AlignedStripe[] divideOneStripe(ECSchema ecSchema,
|
||||
public static AlignedStripe[] divideOneStripe(ErasureCodingPolicy ecPolicy,
|
||||
int cellSize, LocatedStripedBlock blockGroup, long rangeStartInBlockGroup,
|
||||
long rangeEndInBlockGroup, ByteBuffer buf) {
|
||||
final int dataBlkNum = ecSchema.getNumDataUnits();
|
||||
final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
// Step 1: map the byte range to StripingCells
|
||||
StripingCell[] cells = getStripingCellsOfByteRange(ecSchema, cellSize,
|
||||
StripingCell[] cells = getStripingCellsOfByteRange(ecPolicy, cellSize,
|
||||
blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup);
|
||||
|
||||
// Step 2: get the unmerged ranges on each internal block
|
||||
VerticalRange[] ranges = getRangesForInternalBlocks(ecSchema, cellSize,
|
||||
VerticalRange[] ranges = getRangesForInternalBlocks(ecPolicy, cellSize,
|
||||
cells);
|
||||
|
||||
// Step 3: merge into stripes
|
||||
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecSchema, ranges);
|
||||
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
|
||||
|
||||
// Step 4: calculate each chunk's position in destination buffer. Since the
|
||||
// whole read range is within a single stripe, the logic is simpler here.
|
||||
|
@ -400,7 +400,7 @@ public class StripedBlockUtil {
|
|||
/**
|
||||
* This method divides a requested byte range into an array of inclusive
|
||||
* {@link AlignedStripe}.
|
||||
* @param ecSchema The codec schema for the file, which carries the numbers
|
||||
* @param ecPolicy The codec policy for the file, which carries the numbers
|
||||
* of data / parity blocks
|
||||
* @param cellSize Cell size of stripe
|
||||
* @param blockGroup The striped block group
|
||||
|
@ -412,24 +412,24 @@ public class StripedBlockUtil {
|
|||
* At most 5 stripes will be generated from each logical range, as
|
||||
* demonstrated in the header of {@link AlignedStripe}.
|
||||
*/
|
||||
public static AlignedStripe[] divideByteRangeIntoStripes(ECSchema ecSchema,
|
||||
public static AlignedStripe[] divideByteRangeIntoStripes(ErasureCodingPolicy ecPolicy,
|
||||
int cellSize, LocatedStripedBlock blockGroup,
|
||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup, byte[] buf,
|
||||
int offsetInBuf) {
|
||||
|
||||
// Step 0: analyze range and calculate basic parameters
|
||||
final int dataBlkNum = ecSchema.getNumDataUnits();
|
||||
final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
|
||||
// Step 1: map the byte range to StripingCells
|
||||
StripingCell[] cells = getStripingCellsOfByteRange(ecSchema, cellSize,
|
||||
StripingCell[] cells = getStripingCellsOfByteRange(ecPolicy, cellSize,
|
||||
blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup);
|
||||
|
||||
// Step 2: get the unmerged ranges on each internal block
|
||||
VerticalRange[] ranges = getRangesForInternalBlocks(ecSchema, cellSize,
|
||||
VerticalRange[] ranges = getRangesForInternalBlocks(ecPolicy, cellSize,
|
||||
cells);
|
||||
|
||||
// Step 3: merge into at most 5 stripes
|
||||
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecSchema, ranges);
|
||||
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
|
||||
|
||||
// Step 4: calculate each chunk's position in destination buffer
|
||||
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf, offsetInBuf);
|
||||
|
@ -446,7 +446,7 @@ public class StripedBlockUtil {
|
|||
* used by {@link DFSStripedOutputStream} in encoding
|
||||
*/
|
||||
@VisibleForTesting
|
||||
private static StripingCell[] getStripingCellsOfByteRange(ECSchema ecSchema,
|
||||
private static StripingCell[] getStripingCellsOfByteRange(ErasureCodingPolicy ecPolicy,
|
||||
int cellSize, LocatedStripedBlock blockGroup,
|
||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
|
||||
Preconditions.checkArgument(
|
||||
|
@ -461,16 +461,16 @@ public class StripedBlockUtil {
|
|||
final int firstCellOffset = (int) (rangeStartInBlockGroup % cellSize);
|
||||
final int firstCellSize =
|
||||
(int) Math.min(cellSize - (rangeStartInBlockGroup % cellSize), len);
|
||||
cells[0] = new StripingCell(ecSchema, firstCellSize, firstCellIdxInBG,
|
||||
cells[0] = new StripingCell(ecPolicy, firstCellSize, firstCellIdxInBG,
|
||||
firstCellOffset);
|
||||
if (lastCellIdxInBG != firstCellIdxInBG) {
|
||||
final int lastCellSize = (int) (rangeEndInBlockGroup % cellSize) + 1;
|
||||
cells[numCells - 1] = new StripingCell(ecSchema, lastCellSize,
|
||||
cells[numCells - 1] = new StripingCell(ecPolicy, lastCellSize,
|
||||
lastCellIdxInBG, 0);
|
||||
}
|
||||
|
||||
for (int i = 1; i < numCells - 1; i++) {
|
||||
cells[i] = new StripingCell(ecSchema, cellSize, i + firstCellIdxInBG, 0);
|
||||
cells[i] = new StripingCell(ecPolicy, cellSize, i + firstCellIdxInBG, 0);
|
||||
}
|
||||
|
||||
return cells;
|
||||
|
@ -481,10 +481,10 @@ public class StripedBlockUtil {
|
|||
* the physical byte range (inclusive) on each stored internal block.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
private static VerticalRange[] getRangesForInternalBlocks(ECSchema ecSchema,
|
||||
private static VerticalRange[] getRangesForInternalBlocks(ErasureCodingPolicy ecPolicy,
|
||||
int cellSize, StripingCell[] cells) {
|
||||
int dataBlkNum = ecSchema.getNumDataUnits();
|
||||
int parityBlkNum = ecSchema.getNumParityUnits();
|
||||
int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||
|
||||
VerticalRange ranges[] = new VerticalRange[dataBlkNum + parityBlkNum];
|
||||
|
||||
|
@ -521,9 +521,9 @@ public class StripedBlockUtil {
|
|||
* {@link AlignedStripe} instances.
|
||||
*/
|
||||
private static AlignedStripe[] mergeRangesForInternalBlocks(
|
||||
ECSchema ecSchema, VerticalRange[] ranges) {
|
||||
int dataBlkNum = ecSchema.getNumDataUnits();
|
||||
int parityBlkNum = ecSchema.getNumParityUnits();
|
||||
ErasureCodingPolicy ecPolicy, VerticalRange[] ranges) {
|
||||
int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||
List<AlignedStripe> stripes = new ArrayList<>();
|
||||
SortedSet<Long> stripePoints = new TreeSet<>();
|
||||
for (VerticalRange r : ranges) {
|
||||
|
@ -628,7 +628,7 @@ public class StripedBlockUtil {
|
|||
*/
|
||||
@VisibleForTesting
|
||||
static class StripingCell {
|
||||
final ECSchema schema;
|
||||
final ErasureCodingPolicy ecPolicy;
|
||||
/** Logical order in a block group, used when doing I/O to a block group */
|
||||
final int idxInBlkGroup;
|
||||
final int idxInInternalBlk;
|
||||
|
@ -642,13 +642,13 @@ public class StripedBlockUtil {
|
|||
final int offset;
|
||||
final int size;
|
||||
|
||||
StripingCell(ECSchema ecSchema, int cellSize, int idxInBlkGroup,
|
||||
StripingCell(ErasureCodingPolicy ecPolicy, int cellSize, int idxInBlkGroup,
|
||||
int offset) {
|
||||
this.schema = ecSchema;
|
||||
this.ecPolicy = ecPolicy;
|
||||
this.idxInBlkGroup = idxInBlkGroup;
|
||||
this.idxInInternalBlk = idxInBlkGroup / ecSchema.getNumDataUnits();
|
||||
this.idxInInternalBlk = idxInBlkGroup / ecPolicy.getNumDataUnits();
|
||||
this.idxInStripe = idxInBlkGroup -
|
||||
this.idxInInternalBlk * ecSchema.getNumDataUnits();
|
||||
this.idxInInternalBlk * ecPolicy.getNumDataUnits();
|
||||
this.offset = offset;
|
||||
this.size = cellSize;
|
||||
}
|
||||
|
|
|
@ -863,8 +863,8 @@ service ClientNamenodeProtocol {
|
|||
returns(GetCurrentEditLogTxidResponseProto);
|
||||
rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
|
||||
returns(GetEditsFromTxidResponseProto);
|
||||
rpc getECSchemas(GetECSchemasRequestProto)
|
||||
returns(GetECSchemasResponseProto);
|
||||
rpc getErasureCodingPolicies(GetErasureCodingPoliciesRequestProto)
|
||||
returns(GetErasureCodingPoliciesResponseProto);
|
||||
rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
|
||||
returns(GetErasureCodingZoneResponseProto);
|
||||
}
|
||||
|
|
|
@ -28,24 +28,22 @@ import "hdfs.proto";
|
|||
*/
|
||||
message ErasureCodingZoneProto {
|
||||
required string dir = 1;
|
||||
required ECSchemaProto schema = 2;
|
||||
required uint32 cellSize = 3;
|
||||
required ErasureCodingPolicyProto ecPolicy = 2;
|
||||
}
|
||||
|
||||
message CreateErasureCodingZoneRequestProto {
|
||||
required string src = 1;
|
||||
optional ECSchemaProto schema = 2;
|
||||
optional uint32 cellSize = 3;
|
||||
optional ErasureCodingPolicyProto ecPolicy = 2;
|
||||
}
|
||||
|
||||
message CreateErasureCodingZoneResponseProto {
|
||||
}
|
||||
|
||||
message GetECSchemasRequestProto { // void request
|
||||
message GetErasureCodingPoliciesRequestProto { // void request
|
||||
}
|
||||
|
||||
message GetECSchemasResponseProto {
|
||||
repeated ECSchemaProto schemas = 1;
|
||||
message GetErasureCodingPoliciesResponseProto {
|
||||
repeated ErasureCodingPolicyProto ecPolicies = 1;
|
||||
}
|
||||
|
||||
message GetErasureCodingZoneRequestProto {
|
||||
|
@ -66,6 +64,5 @@ message BlockECRecoveryInfoProto {
|
|||
required StorageUuidsProto targetStorageUuids = 4;
|
||||
required StorageTypesProto targetStorageTypes = 5;
|
||||
repeated uint32 liveBlockIndices = 6;
|
||||
required ECSchemaProto ecSchema = 7;
|
||||
required uint32 cellSize = 8;
|
||||
}
|
||||
required ErasureCodingPolicyProto ecPolicy = 7;
|
||||
}
|
||||
|
|
|
@ -141,7 +141,6 @@ message INodeSection {
|
|||
optional XAttrFeatureProto xAttrs = 9;
|
||||
optional uint32 storagePolicyID = 10;
|
||||
optional bool isStriped = 11;
|
||||
optional uint64 stripingCellSize = 12;
|
||||
}
|
||||
|
||||
message QuotaByStorageTypeEntryProto {
|
||||
|
|
|
@ -306,8 +306,7 @@ message LocatedBlocksProto {
|
|||
optional FileEncryptionInfoProto fileEncryptionInfo = 6;
|
||||
|
||||
// Optional field for erasure coding
|
||||
optional ECSchemaProto eCSchema = 7;
|
||||
optional uint32 stripeCellSize = 8;
|
||||
optional ErasureCodingPolicyProto ecPolicy = 7;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -322,11 +321,16 @@ message ECSchemaOptionEntryProto {
|
|||
* ECSchema for erasurecoding
|
||||
*/
|
||||
message ECSchemaProto {
|
||||
required string schemaName = 1;
|
||||
required string codecName = 2;
|
||||
required uint32 dataUnits = 3;
|
||||
required uint32 parityUnits = 4;
|
||||
repeated ECSchemaOptionEntryProto options = 5;
|
||||
required string codecName = 1;
|
||||
required uint32 dataUnits = 2;
|
||||
required uint32 parityUnits = 3;
|
||||
repeated ECSchemaOptionEntryProto options = 4;
|
||||
}
|
||||
|
||||
message ErasureCodingPolicyProto {
|
||||
required string name = 1;
|
||||
required ECSchemaProto schema = 2;
|
||||
required uint32 cellSize = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -365,8 +369,7 @@ message HdfsFileStatusProto {
|
|||
optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
|
||||
|
||||
// Optional field for erasure coding
|
||||
optional ECSchemaProto ecSchema = 17;
|
||||
optional uint32 stripeCellSize = 18;
|
||||
optional ErasureCodingPolicyProto ecPolicy = 17;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1875,7 +1875,7 @@ public class DFSTestUtil {
|
|||
assert dir != null;
|
||||
dfs.mkdirs(dir);
|
||||
try {
|
||||
dfs.getClient().createErasureCodingZone(dir.toString(), null, 0);
|
||||
dfs.getClient().createErasureCodingZone(dir.toString(), null);
|
||||
} catch (IOException e) {
|
||||
if (!e.getMessage().contains("non-empty directory")) {
|
||||
throw e;
|
||||
|
|
|
@ -255,12 +255,12 @@ public class TestDFSClientRetries {
|
|||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010, 0, null, (byte) 0, null, 0)).when(mockNN).getFileInfo(anyString());
|
||||
1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
|
||||
|
||||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010, 0, null, (byte) 0, null, 0))
|
||||
1010, 0, null, (byte) 0, null))
|
||||
.when(mockNN)
|
||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||
|
@ -496,7 +496,7 @@ public class TestDFSClientRetries {
|
|||
badBlocks.add(badLocatedBlock);
|
||||
return new LocatedBlocks(goodBlockList.getFileLength(), false,
|
||||
badBlocks, null, true,
|
||||
null, null, 0);
|
||||
null, null);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,10 +35,10 @@ import static org.junit.Assert.assertTrue;
|
|||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
@ -59,7 +59,7 @@ public class TestDFSStripedInputStream {
|
|||
private DistributedFileSystem fs;
|
||||
private final Path dirPath = new Path("/striped");
|
||||
private Path filePath = new Path(dirPath, "file");
|
||||
private final ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
private final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short DATA_BLK_NUM = HdfsConstants.NUM_DATA_BLOCKS;
|
||||
private final short PARITY_BLK_NUM = HdfsConstants.NUM_PARITY_BLOCKS;
|
||||
private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
|
@ -79,7 +79,7 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
fs = cluster.getFileSystem();
|
||||
fs.mkdirs(dirPath);
|
||||
fs.getClient().createErasureCodingZone(dirPath.toString(), null, CELLSIZE);
|
||||
fs.getClient().createErasureCodingZone(dirPath.toString(), null);
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -100,7 +100,7 @@ public class TestDFSStripedInputStream {
|
|||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
||||
filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
|
||||
final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
|
||||
filePath.toString(), false, schema, CELLSIZE, null);
|
||||
filePath.toString(), false, ecPolicy, null);
|
||||
|
||||
List<LocatedBlock> lbList = lbs.getLocatedBlocks();
|
||||
for (LocatedBlock aLbList : lbList) {
|
||||
|
@ -152,7 +152,7 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
}
|
||||
DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
|
||||
filePath.toString(), false, schema, CELLSIZE, null);
|
||||
filePath.toString(), false, ecPolicy, null);
|
||||
|
||||
int[] startOffsets = {0, 1, CELLSIZE - 102, CELLSIZE, CELLSIZE + 102,
|
||||
CELLSIZE*DATA_BLK_NUM, CELLSIZE*DATA_BLK_NUM + 102,
|
||||
|
@ -194,7 +194,7 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
DFSStripedInputStream in =
|
||||
new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
|
||||
ErasureCodingSchemaManager.getSystemDefaultSchema(), CELLSIZE, null);
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy(), null);
|
||||
int readSize = BLOCK_GROUP_SIZE;
|
||||
byte[] readBuffer = new byte[readSize];
|
||||
byte[] expected = new byte[readSize];
|
||||
|
@ -292,7 +292,7 @@ public class TestDFSStripedInputStream {
|
|||
|
||||
DFSStripedInputStream in =
|
||||
new DFSStripedInputStream(fs.getClient(), filePath.toString(),
|
||||
false, schema, CELLSIZE, null);
|
||||
false, ecPolicy, null);
|
||||
|
||||
byte[] expected = new byte[fileSize];
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ public class TestDFSStripedOutputStream {
|
|||
conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0);
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem();
|
||||
dfs.mkdirs(dir);
|
||||
dfs.createErasureCodingZone(dir, null, 0);
|
||||
dfs.createErasureCodingZone(dir, null);
|
||||
}
|
||||
|
||||
private void tearDown() {
|
||||
|
|
|
@ -110,7 +110,7 @@ public class TestDFSUtil {
|
|||
l2.setCorrupt(true);
|
||||
|
||||
List<LocatedBlock> ls = Arrays.asList(l1, l2);
|
||||
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null, 0);
|
||||
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null);
|
||||
|
||||
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ public class TestDatanodeConfig {
|
|||
public void testDataDirectories() throws IOException {
|
||||
File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
|
||||
Configuration conf = cluster.getConfiguration(0);
|
||||
// 1. Test unsupported schema. Only "file:" is supported.
|
||||
// 1. Test unsupported ecPolicy. Only "file:" is supported.
|
||||
String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
|
||||
DataNode dn = null;
|
||||
|
@ -97,7 +97,7 @@ public class TestDatanodeConfig {
|
|||
}
|
||||
assertNull("Data-node startup should have failed.", dn);
|
||||
|
||||
// 2. Test "file:" schema and no schema (path-only). Both should work.
|
||||
// 2. Test "file:" ecPolicy and no ecPolicy (path-only). Both should work.
|
||||
String dnDir1 = fileAsURI(dataDir).toString() + "1";
|
||||
String dnDir2 = makeURI("file", "localhost",
|
||||
fileAsURI(dataDir).getPath() + "2");
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestECSchemas {
|
||||
private MiniDFSCluster cluster;
|
||||
|
||||
@Before
|
||||
public void before() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetECSchemas() throws Exception {
|
||||
ECSchema[] ecSchemas = cluster.getFileSystem().getClient().getECSchemas();
|
||||
assertNotNull(ecSchemas);
|
||||
assertTrue("Should have at least one schema", ecSchemas.length > 0);
|
||||
}
|
||||
}
|
|
@ -737,7 +737,7 @@ public class TestEncryptionZones {
|
|||
version, new byte[suite.getAlgorithmBlockSize()],
|
||||
new byte[suite.getAlgorithmBlockSize()],
|
||||
"fakeKey", "fakeVersion"),
|
||||
(byte) 0, null, 0))
|
||||
(byte) 0, null))
|
||||
.when(mcp)
|
||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||
|
|
|
@ -22,10 +22,10 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
@ -65,7 +65,7 @@ public class TestErasureCodingZones {
|
|||
fs.mkdir(testDir, FsPermission.getDirDefault());
|
||||
|
||||
/* Normal creation of an erasure coding zone */
|
||||
fs.getClient().createErasureCodingZone(testDir.toString(), null, 0);
|
||||
fs.getClient().createErasureCodingZone(testDir.toString(), null);
|
||||
|
||||
/* Verify files under the zone are striped */
|
||||
final Path ECFilePath = new Path(testDir, "foo");
|
||||
|
@ -78,7 +78,7 @@ public class TestErasureCodingZones {
|
|||
fs.mkdir(notEmpty, FsPermission.getDirDefault());
|
||||
fs.create(new Path(notEmpty, "foo"));
|
||||
try {
|
||||
fs.getClient().createErasureCodingZone(notEmpty.toString(), null, 0);
|
||||
fs.getClient().createErasureCodingZone(notEmpty.toString(), null);
|
||||
fail("Erasure coding zone on non-empty dir");
|
||||
} catch (IOException e) {
|
||||
assertExceptionContains("erasure coding zone for a non-empty directory", e);
|
||||
|
@ -88,10 +88,10 @@ public class TestErasureCodingZones {
|
|||
final Path zone1 = new Path("/zone1");
|
||||
final Path zone2 = new Path(zone1, "zone2");
|
||||
fs.mkdir(zone1, FsPermission.getDirDefault());
|
||||
fs.getClient().createErasureCodingZone(zone1.toString(), null, 0);
|
||||
fs.getClient().createErasureCodingZone(zone1.toString(), null);
|
||||
fs.mkdir(zone2, FsPermission.getDirDefault());
|
||||
try {
|
||||
fs.getClient().createErasureCodingZone(zone2.toString(), null, 0);
|
||||
fs.getClient().createErasureCodingZone(zone2.toString(), null);
|
||||
fail("Nested erasure coding zones");
|
||||
} catch (IOException e) {
|
||||
assertExceptionContains("already in an erasure coding zone", e);
|
||||
|
@ -101,7 +101,7 @@ public class TestErasureCodingZones {
|
|||
final Path fPath = new Path("/file");
|
||||
fs.create(fPath);
|
||||
try {
|
||||
fs.getClient().createErasureCodingZone(fPath.toString(), null, 0);
|
||||
fs.getClient().createErasureCodingZone(fPath.toString(), null);
|
||||
fail("Erasure coding zone on file");
|
||||
} catch (IOException e) {
|
||||
assertExceptionContains("erasure coding zone for a file", e);
|
||||
|
@ -114,8 +114,8 @@ public class TestErasureCodingZones {
|
|||
final Path dstECDir = new Path("/dstEC");
|
||||
fs.mkdir(srcECDir, FsPermission.getDirDefault());
|
||||
fs.mkdir(dstECDir, FsPermission.getDirDefault());
|
||||
fs.getClient().createErasureCodingZone(srcECDir.toString(), null, 0);
|
||||
fs.getClient().createErasureCodingZone(dstECDir.toString(), null, 0);
|
||||
fs.getClient().createErasureCodingZone(srcECDir.toString(), null);
|
||||
fs.getClient().createErasureCodingZone(dstECDir.toString(), null);
|
||||
final Path srcFile = new Path(srcECDir, "foo");
|
||||
fs.create(srcFile);
|
||||
|
||||
|
@ -160,7 +160,7 @@ public class TestErasureCodingZones {
|
|||
public void testReplication() throws IOException {
|
||||
final Path testDir = new Path("/ec");
|
||||
fs.mkdir(testDir, FsPermission.getDirDefault());
|
||||
fs.createErasureCodingZone(testDir, null, 0);
|
||||
fs.createErasureCodingZone(testDir, null);
|
||||
final Path fooFile = new Path(testDir, "foo");
|
||||
// create ec file with replication=0
|
||||
fs.create(fooFile, FsPermission.getFileDefault(), true,
|
||||
|
@ -177,47 +177,47 @@ public class TestErasureCodingZones {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testGetErasureCodingInfoWithSystemDefaultSchema() throws Exception {
|
||||
public void testGetErasureCodingInfoWithSystemDefaultECPolicy() throws Exception {
|
||||
String src = "/ec";
|
||||
final Path ecDir = new Path(src);
|
||||
fs.mkdir(ecDir, FsPermission.getDirDefault());
|
||||
// dir ECInfo before creating ec zone
|
||||
assertNull(fs.getClient().getFileInfo(src).getECSchema());
|
||||
assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
|
||||
// dir ECInfo after creating ec zone
|
||||
fs.getClient().createErasureCodingZone(src, null, 0); //Default one will be used.
|
||||
ECSchema sysDefaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
verifyErasureCodingInfo(src, sysDefaultSchema);
|
||||
fs.getClient().createErasureCodingZone(src, null); //Default one will be used.
|
||||
ErasureCodingPolicy sysDefaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
verifyErasureCodingInfo(src, sysDefaultECPolicy);
|
||||
fs.create(new Path(ecDir, "child1")).close();
|
||||
// verify for the files in ec zone
|
||||
verifyErasureCodingInfo(src + "/child1", sysDefaultSchema);
|
||||
verifyErasureCodingInfo(src + "/child1", sysDefaultECPolicy);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetErasureCodingInfo() throws Exception {
|
||||
ECSchema[] sysSchemas = ErasureCodingSchemaManager.getSystemSchemas();
|
||||
assertTrue("System schemas should be of only 1 for now",
|
||||
sysSchemas.length == 1);
|
||||
ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager.getSystemPolices();
|
||||
assertTrue("System ecPolicies should be of only 1 for now",
|
||||
sysECPolicies.length == 1);
|
||||
|
||||
ECSchema usingSchema = sysSchemas[0];
|
||||
ErasureCodingPolicy usingECPolicy = sysECPolicies[0];
|
||||
String src = "/ec2";
|
||||
final Path ecDir = new Path(src);
|
||||
fs.mkdir(ecDir, FsPermission.getDirDefault());
|
||||
// dir ECInfo before creating ec zone
|
||||
assertNull(fs.getClient().getFileInfo(src).getECSchema());
|
||||
assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
|
||||
// dir ECInfo after creating ec zone
|
||||
fs.getClient().createErasureCodingZone(src, usingSchema, 0);
|
||||
verifyErasureCodingInfo(src, usingSchema);
|
||||
fs.getClient().createErasureCodingZone(src, usingECPolicy);
|
||||
verifyErasureCodingInfo(src, usingECPolicy);
|
||||
fs.create(new Path(ecDir, "child1")).close();
|
||||
// verify for the files in ec zone
|
||||
verifyErasureCodingInfo(src + "/child1", usingSchema);
|
||||
verifyErasureCodingInfo(src + "/child1", usingECPolicy);
|
||||
}
|
||||
|
||||
private void verifyErasureCodingInfo(
|
||||
String src, ECSchema usingSchema) throws IOException {
|
||||
String src, ErasureCodingPolicy usingECPolicy) throws IOException {
|
||||
HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
|
||||
ECSchema schema = hdfsFileStatus.getECSchema();
|
||||
assertNotNull(schema);
|
||||
assertEquals("Actually used schema should be equal with target schema",
|
||||
usingSchema, schema);
|
||||
ErasureCodingPolicy ecPolicy = hdfsFileStatus.getErasureCodingPolicy();
|
||||
assertNotNull(ecPolicy);
|
||||
assertEquals("Actually used ecPolicy should be equal with target ecPolicy",
|
||||
usingECPolicy, ecPolicy);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,13 +9,13 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestFileStatusWithECschema {
|
||||
public class TestFileStatusWithECPolicy {
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem fs;
|
||||
private DFSClient client;
|
||||
|
@ -37,29 +37,29 @@ public class TestFileStatusWithECschema {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testFileStatusWithECschema() throws Exception {
|
||||
public void testFileStatusWithECPolicy() throws Exception {
|
||||
// test directory not in EC zone
|
||||
final Path dir = new Path("/foo");
|
||||
assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
|
||||
assertNull(client.getFileInfo(dir.toString()).getECSchema());
|
||||
assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy());
|
||||
// test file not in EC zone
|
||||
final Path file = new Path(dir, "foo");
|
||||
fs.create(file).close();
|
||||
assertNull(client.getFileInfo(file.toString()).getECSchema());
|
||||
assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
|
||||
fs.delete(file, true);
|
||||
|
||||
final ECSchema schema1 = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
// create EC zone on dir
|
||||
fs.createErasureCodingZone(dir, schema1, 0);
|
||||
final ECSchema schame2 = client.getFileInfo(dir.toUri().getPath()).getECSchema();
|
||||
assertNotNull(schame2);
|
||||
assertTrue(schema1.equals(schame2));
|
||||
fs.createErasureCodingZone(dir, ecPolicy1);
|
||||
final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
|
||||
assertNotNull(ecPolicy2);
|
||||
assertTrue(ecPolicy1.equals(ecPolicy2));
|
||||
|
||||
// test file in EC zone
|
||||
fs.create(file).close();
|
||||
final ECSchema schame3 =
|
||||
fs.getClient().getFileInfo(file.toUri().getPath()).getECSchema();
|
||||
assertNotNull(schame3);
|
||||
assertTrue(schema1.equals(schame3));
|
||||
final ErasureCodingPolicy ecPolicy3 =
|
||||
fs.getClient().getFileInfo(file.toUri().getPath()).getErasureCodingPolicy();
|
||||
assertNotNull(ecPolicy3);
|
||||
assertTrue(ecPolicy1.equals(ecPolicy3));
|
||||
}
|
||||
}
|
|
@ -354,12 +354,12 @@ public class TestLease {
|
|||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010, 0, null, (byte) 0, null, 0)).when(mcp).getFileInfo(anyString());
|
||||
1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
|
||||
Mockito
|
||||
.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010, 0, null, (byte) 0, null, 0))
|
||||
1010, 0, null, (byte) 0, null))
|
||||
.when(mcp)
|
||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||
|
|
|
@ -64,8 +64,7 @@ public class TestReadStripedFileWithDecoding {
|
|||
public void setup() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
|
||||
.numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
||||
null, cellSize);
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
|
|
@ -52,8 +52,7 @@ public class TestReadStripedFileWithMissingBlocks {
|
|||
public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
||||
null, cellSize);
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ public class TestRecoverStripedFile {
|
|||
cluster.waitActive();
|
||||
|
||||
fs = cluster.getFileSystem();
|
||||
fs.getClient().createErasureCodingZone("/", null, 0);
|
||||
fs.getClient().createErasureCodingZone("/", null);
|
||||
|
||||
List<DataNode> datanodes = cluster.getDataNodes();
|
||||
for (int i = 0; i < dnNum; i++) {
|
||||
|
|
|
@ -54,8 +54,7 @@ public class TestSafeModeWithStripedFile {
|
|||
conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
||||
null, cellSize);
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||
cluster.waitActive();
|
||||
|
||||
}
|
||||
|
|
|
@ -57,8 +57,7 @@ public class TestWriteReadStripedFile {
|
|||
public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
||||
null, cellSize);
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
|
|
@ -48,8 +48,7 @@ public class TestWriteStripedFileWithFailure {
|
|||
public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
||||
null, cellSize);
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
||||
|
@ -88,7 +88,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
@ -682,8 +682,7 @@ public class TestPBHelper {
|
|||
short[] liveBlkIndices0 = new short[2];
|
||||
BlockECRecoveryInfo blkECRecoveryInfo0 = new BlockECRecoveryInfo(
|
||||
new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0,
|
||||
liveBlkIndices0, ErasureCodingSchemaManager.getSystemDefaultSchema(),
|
||||
64 * 1024);
|
||||
liveBlkIndices0, ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
DatanodeInfo[] dnInfos1 = new DatanodeInfo[] {
|
||||
DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
|
||||
DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil
|
||||
|
@ -697,8 +696,7 @@ public class TestPBHelper {
|
|||
short[] liveBlkIndices1 = new short[2];
|
||||
BlockECRecoveryInfo blkECRecoveryInfo1 = new BlockECRecoveryInfo(
|
||||
new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1,
|
||||
liveBlkIndices1, ErasureCodingSchemaManager.getSystemDefaultSchema(),
|
||||
64 * 1024);
|
||||
liveBlkIndices1, ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
List<BlockECRecoveryInfo> blkRecoveryInfosList = new ArrayList<BlockECRecoveryInfo>();
|
||||
blkRecoveryInfosList.add(blkECRecoveryInfo0);
|
||||
blkRecoveryInfosList.add(blkECRecoveryInfo1);
|
||||
|
@ -740,18 +738,18 @@ public class TestPBHelper {
|
|||
assertEquals(liveBlockIndices1[i], liveBlockIndices2[i]);
|
||||
}
|
||||
|
||||
ECSchema ecSchema1 = blkECRecoveryInfo1.getECSchema();
|
||||
ECSchema ecSchema2 = blkECRecoveryInfo2.getECSchema();
|
||||
// Compare ECSchemas same as default ECSchema as we used system default
|
||||
// ECSchema used in this test
|
||||
compareECSchemas(ErasureCodingSchemaManager.getSystemDefaultSchema(), ecSchema1);
|
||||
compareECSchemas(ErasureCodingSchemaManager.getSystemDefaultSchema(), ecSchema2);
|
||||
ErasureCodingPolicy ecPolicy1 = blkECRecoveryInfo1.getErasureCodingPolicy();
|
||||
ErasureCodingPolicy ecPolicy2 = blkECRecoveryInfo2.getErasureCodingPolicy();
|
||||
// Compare ECPolicies same as default ECPolicy as we used system default
|
||||
// ECPolicy used in this test
|
||||
compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy1);
|
||||
compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy2);
|
||||
}
|
||||
|
||||
private void compareECSchemas(ECSchema ecSchema1, ECSchema ecSchema2) {
|
||||
assertEquals(ecSchema1.getSchemaName(), ecSchema2.getSchemaName());
|
||||
assertEquals(ecSchema1.getNumDataUnits(), ecSchema2.getNumDataUnits());
|
||||
assertEquals(ecSchema1.getNumParityUnits(), ecSchema2.getNumParityUnits());
|
||||
private void compareECPolicies(ErasureCodingPolicy ecPolicy1, ErasureCodingPolicy ecPolicy2) {
|
||||
assertEquals(ecPolicy1.getName(), ecPolicy2.getName());
|
||||
assertEquals(ecPolicy1.getNumDataUnits(), ecPolicy2.getNumDataUnits());
|
||||
assertEquals(ecPolicy1.getNumParityUnits(), ecPolicy2.getNumParityUnits());
|
||||
}
|
||||
|
||||
private void assertDnInfosEqual(DatanodeInfo[] dnInfos1,
|
||||
|
|
|
@ -1503,7 +1503,7 @@ public class TestBalancer {
|
|||
cluster.waitActive();
|
||||
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
|
||||
ClientProtocol.class).getProxy();
|
||||
client.createErasureCodingZone("/", null, 0);
|
||||
client.createErasureCodingZone("/", null);
|
||||
|
||||
long totalCapacity = sum(capacities);
|
||||
|
||||
|
|
|
@ -19,10 +19,9 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.mockito.internal.util.reflection.Whitebox;
|
||||
|
@ -45,11 +44,10 @@ public class TestBlockInfoStriped {
|
|||
private static final int TOTAL_NUM_BLOCKS = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
private static final long BASE_ID = -1600;
|
||||
private static final Block baseBlock = new Block(BASE_ID);
|
||||
private static final ECSchema testSchema
|
||||
= ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final ErasureCodingPolicy testECPolicy
|
||||
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
|
||||
testSchema, cellSize);
|
||||
testECPolicy);
|
||||
|
||||
private Block[] createReportedBlocks(int num) {
|
||||
Block[] blocks = new Block[num];
|
||||
|
@ -237,7 +235,7 @@ public class TestBlockInfoStriped {
|
|||
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
||||
DataOutput out = new DataOutputStream(byteStream);
|
||||
BlockInfoStriped blk = new BlockInfoStriped(new Block(blkID, numBytes,
|
||||
generationStamp), testSchema, cellSize);
|
||||
generationStamp), testECPolicy);
|
||||
|
||||
try {
|
||||
blk.write(out);
|
||||
|
|
|
@ -56,7 +56,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
|
|||
conf = getConf();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient()
|
||||
.createErasureCodingZone("/", null, cellSize);
|
||||
.createErasureCodingZone("/", null);
|
||||
try {
|
||||
cluster.waitActive();
|
||||
doTestRead(conf, cluster, true);
|
||||
|
|
|
@ -86,7 +86,7 @@ public class TestSequentialBlockGroupId {
|
|||
.getBlockGroupIdGenerator();
|
||||
fs.mkdirs(eczone);
|
||||
cluster.getFileSystem().getClient()
|
||||
.createErasureCodingZone("/eczone", null, cellSize);
|
||||
.createErasureCodingZone("/eczone", null);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -19,9 +19,8 @@
|
|||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
@ -31,17 +30,15 @@ import static org.junit.Assert.fail;
|
|||
|
||||
public class TestUnderReplicatedBlockQueues {
|
||||
|
||||
private final ECSchema ecSchema =
|
||||
ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
|
||||
private BlockInfo genBlockInfo(long id) {
|
||||
return new BlockInfoContiguous(new Block(id), (short) 3);
|
||||
}
|
||||
|
||||
private BlockInfo genStripedBlockInfo(long id, long numBytes) {
|
||||
BlockInfoStriped sblk = new BlockInfoStriped(new Block(id), ecSchema,
|
||||
CELLSIZE);
|
||||
BlockInfoStriped sblk = new BlockInfoStriped(new Block(id), ecPolicy);
|
||||
sblk.setNumBytes(numBytes);
|
||||
return sblk;
|
||||
}
|
||||
|
@ -101,8 +98,8 @@ public class TestUnderReplicatedBlockQueues {
|
|||
|
||||
@Test
|
||||
public void testStripedBlockPriorities() throws Throwable {
|
||||
int dataBlkNum = ecSchema.getNumDataUnits();
|
||||
int parityBlkNUm = ecSchema.getNumParityUnits();
|
||||
int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
int parityBlkNUm = ecPolicy.getNumParityUnits();
|
||||
doTestStripedBlockPriorities(1, parityBlkNUm);
|
||||
doTestStripedBlockPriorities(dataBlkNum, parityBlkNUm);
|
||||
}
|
||||
|
@ -110,7 +107,7 @@ public class TestUnderReplicatedBlockQueues {
|
|||
private void doTestStripedBlockPriorities(int dataBlkNum, int parityBlkNum)
|
||||
throws Throwable {
|
||||
int groupSize = dataBlkNum + parityBlkNum;
|
||||
long numBytes = CELLSIZE * dataBlkNum;
|
||||
long numBytes = ecPolicy.getCellSize() * dataBlkNum;
|
||||
UnderReplicatedBlocks queues = new UnderReplicatedBlocks();
|
||||
|
||||
// add a striped block which been left NUM_DATA_BLOCKS internal blocks
|
||||
|
|
|
@ -426,7 +426,7 @@ public class TestMover {
|
|||
client.setStoragePolicy(barDir,
|
||||
HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
|
||||
// set "/bar" directory with EC zone.
|
||||
client.createErasureCodingZone(barDir, null, 0);
|
||||
client.createErasureCodingZone(barDir, null);
|
||||
|
||||
// write file to barDir
|
||||
final String fooFile = "/bar/foo";
|
||||
|
|
|
@ -70,7 +70,7 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
fs.mkdirs(dirPath);
|
||||
fs.getClient().createErasureCodingZone(dirPath.toString(), null, CELLSIZE);
|
||||
fs.getClient().createErasureCodingZone(dirPath.toString(), null);
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -180,7 +180,7 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
long groupId = bg.getBlock().getBlockId();
|
||||
Block blk = new Block(groupId, BLOCK_SIZE, gs);
|
||||
BlockInfoStriped blockInfo = new BlockInfoStriped(blk,
|
||||
ErasureCodingSchemaManager.getSystemDefaultSchema(), CELLSIZE);
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
for (int i = 0; i < GROUP_SIZE; i++) {
|
||||
blk.setBlockId(groupId + i);
|
||||
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
|
||||
|
|
|
@ -75,7 +75,7 @@ public class TestAddStripedBlocks {
|
|||
.numDataNodes(GROUP_SIZE).build();
|
||||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem();
|
||||
dfs.getClient().createErasureCodingZone("/", null, 0);
|
||||
dfs.getClient().createErasureCodingZone("/", null);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
|
@ -57,7 +57,6 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.test.PathUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Test;
|
||||
|
@ -76,8 +75,8 @@ public class TestFSEditLogLoader {
|
|||
|
||||
private static final int NUM_DATA_NODES = 0;
|
||||
|
||||
private static final ECSchema testSchema
|
||||
= ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
private static final ErasureCodingPolicy testECPolicy
|
||||
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
|
||||
@Test
|
||||
public void testDisplayRecentEditLogOpCodes() throws IOException {
|
||||
|
@ -450,11 +449,10 @@ public class TestFSEditLogLoader {
|
|||
long timestamp = 1426222918;
|
||||
short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
|
||||
short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
|
||||
int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
|
||||
//set the storage policy of the directory
|
||||
fs.mkdir(new Path(testDir), new FsPermission("755"));
|
||||
fs.getClient().getNamenode().createErasureCodingZone(testDir, null, 0);
|
||||
fs.getClient().getNamenode().createErasureCodingZone(testDir, null);
|
||||
|
||||
// Create a file with striped block
|
||||
Path p = new Path(testFilePath);
|
||||
|
@ -466,7 +464,7 @@ public class TestFSEditLogLoader {
|
|||
|
||||
// Add a striped block to the file
|
||||
BlockInfoStriped stripedBlk = new BlockInfoStriped(
|
||||
new Block(blkId, blkNumBytes, timestamp), testSchema, cellSize);
|
||||
new Block(blkId, blkNumBytes, timestamp), testECPolicy);
|
||||
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
|
||||
file.toUnderConstruction(clientName, clientMachine);
|
||||
file.addBlock(stripedBlk);
|
||||
|
@ -491,7 +489,6 @@ public class TestFSEditLogLoader {
|
|||
assertEquals(timestamp, blks[0].getGenerationStamp());
|
||||
assertEquals(blockNum, ((BlockInfoStriped)blks[0]).getDataBlockNum());
|
||||
assertEquals(parityNum, ((BlockInfoStriped)blks[0]).getParityBlockNum());
|
||||
assertEquals(cellSize, ((BlockInfoStriped)blks[0]).getCellSize());
|
||||
|
||||
cluster.shutdown();
|
||||
cluster = null;
|
||||
|
@ -524,17 +521,16 @@ public class TestFSEditLogLoader {
|
|||
long timestamp = 1426222918;
|
||||
short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
|
||||
short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
|
||||
int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
|
||||
//set the storage policy of the directory
|
||||
fs.mkdir(new Path(testDir), new FsPermission("755"));
|
||||
fs.getClient().getNamenode().createErasureCodingZone(testDir, null, 0);
|
||||
fs.getClient().getNamenode().createErasureCodingZone(testDir, null);
|
||||
|
||||
//create a file with striped blocks
|
||||
Path p = new Path(testFilePath);
|
||||
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
|
||||
BlockInfoStriped stripedBlk = new BlockInfoStriped(
|
||||
new Block(blkId, blkNumBytes, timestamp), testSchema, cellSize);
|
||||
new Block(blkId, blkNumBytes, timestamp), testECPolicy);
|
||||
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
|
||||
file.toUnderConstruction(clientName, clientMachine);
|
||||
file.addBlock(stripedBlk);
|
||||
|
@ -573,7 +569,6 @@ public class TestFSEditLogLoader {
|
|||
assertEquals(newTimestamp, blks[0].getGenerationStamp());
|
||||
assertEquals(blockNum, ((BlockInfoStriped)blks[0]).getDataBlockNum());
|
||||
assertEquals(parityNum, ((BlockInfoStriped)blks[0]).getParityBlockNum());
|
||||
assertEquals(cellSize, ((BlockInfoStriped)blks[0]).getCellSize());
|
||||
|
||||
cluster.shutdown();
|
||||
cluster = null;
|
||||
|
|
|
@ -28,11 +28,11 @@ import java.io.IOException;
|
|||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
|
@ -68,9 +68,8 @@ public class TestFSImage {
|
|||
|
||||
private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
|
||||
"image-with-zero-block-size.tar.gz";
|
||||
private static final ECSchema testSchema
|
||||
= ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final ErasureCodingPolicy testECPolicy
|
||||
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
|
||||
@Test
|
||||
public void testPersist() throws IOException {
|
||||
|
@ -141,7 +140,7 @@ public class TestFSImage {
|
|||
private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
|
||||
boolean isUC) throws IOException{
|
||||
// contruct a INode with StripedBlock for saving and loading
|
||||
fsn.createErasureCodingZone("/", null, 0, false);
|
||||
fsn.createErasureCodingZone("/", null, false);
|
||||
long id = 123456789;
|
||||
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
|
||||
PermissionStatus permissionStatus = new PermissionStatus("testuser_a",
|
||||
|
@ -162,7 +161,7 @@ public class TestFSImage {
|
|||
for (int i = 0; i < stripedBlks.length; i++) {
|
||||
stripedBlks[i] = new BlockInfoStriped(
|
||||
new Block(stripedBlkId + i, preferredBlockSize, timestamp),
|
||||
testSchema, cellSize);
|
||||
testECPolicy);
|
||||
file.addBlock(stripedBlks[i]);
|
||||
}
|
||||
|
||||
|
@ -386,7 +385,7 @@ public class TestFSImage {
|
|||
.build();
|
||||
cluster.waitActive();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
fs.getClient().getNamenode().createErasureCodingZone("/", null, 0);
|
||||
fs.getClient().getNamenode().createErasureCodingZone("/", null);
|
||||
Path file = new Path("/striped");
|
||||
FSDataOutputStream out = fs.create(file);
|
||||
byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE);
|
||||
|
|
|
@ -1202,7 +1202,7 @@ public class TestFsck {
|
|||
|
||||
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
|
||||
blockSize, modTime, accessTime, perms, owner, group, symlink,
|
||||
path, fileId, numChildren, null, storagePolicy, null, 0);
|
||||
path, fileId, numChildren, null, storagePolicy, null);
|
||||
Result replRes = new ReplicationResult(conf);
|
||||
Result ecRes = new ErasureCodingResult(conf);
|
||||
|
||||
|
@ -1644,8 +1644,8 @@ public class TestFsck {
|
|||
final long precision = 1L;
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||
int totalSize = ErasureCodingSchemaManager.getSystemDefaultSchema().getNumDataUnits()
|
||||
+ ErasureCodingSchemaManager.getSystemDefaultSchema().getNumParityUnits();
|
||||
int totalSize = ErasureCodingPolicyManager.getSystemDefaultPolicy().getNumDataUnits()
|
||||
+ ErasureCodingPolicyManager.getSystemDefaultPolicy().getNumParityUnits();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
|
||||
fs = cluster.getFileSystem();
|
||||
|
||||
|
|
|
@ -26,11 +26,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -44,10 +44,10 @@ import java.io.IOException;
|
|||
public class TestQuotaWithStripedBlocks {
|
||||
private static final int BLOCK_SIZE = 1024 * 1024;
|
||||
private static final long DISK_QUOTA = BLOCK_SIZE * 10;
|
||||
private static final ECSchema ecSchema =
|
||||
ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
private static final int NUM_DATA_BLOCKS = ecSchema.getNumDataUnits();
|
||||
private static final int NUM_PARITY_BLOCKS = ecSchema.getNumParityUnits();
|
||||
private static final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private static final int NUM_DATA_BLOCKS = ecPolicy.getNumDataUnits();
|
||||
private static final int NUM_PARITY_BLOCKS = ecPolicy.getNumParityUnits();
|
||||
private static final int GROUP_SIZE = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
private static final Path ecDir = new Path("/ec");
|
||||
|
||||
|
@ -66,7 +66,7 @@ public class TestQuotaWithStripedBlocks {
|
|||
dfs = cluster.getFileSystem();
|
||||
|
||||
dfs.mkdirs(ecDir);
|
||||
dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema, 0);
|
||||
dfs.getClient().createErasureCodingZone(ecDir.toString(), ecPolicy);
|
||||
dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
|
||||
dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
|
||||
dfs.setStoragePolicy(ecDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
|
||||
|
|
|
@ -35,13 +35,13 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionStriped;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -59,9 +59,8 @@ public class TestStripedINodeFile {
|
|||
private final BlockStoragePolicy defaultPolicy =
|
||||
defaultSuite.getDefaultPolicy();
|
||||
|
||||
private static final ECSchema testSchema
|
||||
= ErasureCodingSchemaManager.getSystemDefaultSchema();
|
||||
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final ErasureCodingPolicy testECPolicy
|
||||
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
|
||||
private static INodeFile createStripedINodeFile() {
|
||||
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
||||
|
@ -79,7 +78,7 @@ public class TestStripedINodeFile {
|
|||
public void testBlockStripedTotalBlockCount() {
|
||||
Block blk = new Block(1);
|
||||
BlockInfoStriped blockInfoStriped
|
||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
||||
= new BlockInfoStriped(blk, testECPolicy);
|
||||
assertEquals(9, blockInfoStriped.getTotalBlockNum());
|
||||
}
|
||||
|
||||
|
@ -89,7 +88,7 @@ public class TestStripedINodeFile {
|
|||
INodeFile inf = createStripedINodeFile();
|
||||
Block blk = new Block(1);
|
||||
BlockInfoStriped blockInfoStriped
|
||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
||||
= new BlockInfoStriped(blk, testECPolicy);
|
||||
inf.addBlock(blockInfoStriped);
|
||||
assertEquals(1, inf.getBlocks().length);
|
||||
}
|
||||
|
@ -100,7 +99,7 @@ public class TestStripedINodeFile {
|
|||
INodeFile inf = createStripedINodeFile();
|
||||
Block blk = new Block(1);
|
||||
BlockInfoStriped blockInfoStriped
|
||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
||||
= new BlockInfoStriped(blk, testECPolicy);
|
||||
blockInfoStriped.setNumBytes(1);
|
||||
inf.addBlock(blockInfoStriped);
|
||||
// 0. Calculate the total bytes per stripes <Num Bytes per Stripes>
|
||||
|
@ -125,11 +124,11 @@ public class TestStripedINodeFile {
|
|||
INodeFile inf = createStripedINodeFile();
|
||||
Block blk1 = new Block(1);
|
||||
BlockInfoStriped blockInfoStriped1
|
||||
= new BlockInfoStriped(blk1, testSchema, cellSize);
|
||||
= new BlockInfoStriped(blk1, testECPolicy);
|
||||
blockInfoStriped1.setNumBytes(1);
|
||||
Block blk2 = new Block(2);
|
||||
BlockInfoStriped blockInfoStriped2
|
||||
= new BlockInfoStriped(blk2, testSchema, cellSize);
|
||||
= new BlockInfoStriped(blk2, testECPolicy);
|
||||
blockInfoStriped2.setNumBytes(1);
|
||||
inf.addBlock(blockInfoStriped1);
|
||||
inf.addBlock(blockInfoStriped2);
|
||||
|
@ -144,7 +143,7 @@ public class TestStripedINodeFile {
|
|||
INodeFile inf = createStripedINodeFile();
|
||||
Block blk = new Block(1);
|
||||
BlockInfoStriped blockInfoStriped
|
||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
||||
= new BlockInfoStriped(blk, testECPolicy);
|
||||
blockInfoStriped.setNumBytes(100);
|
||||
inf.addBlock(blockInfoStriped);
|
||||
// Compute file size should return actual data
|
||||
|
@ -159,7 +158,7 @@ public class TestStripedINodeFile {
|
|||
INodeFile inf = createStripedINodeFile();
|
||||
Block blk = new Block(1);
|
||||
BlockInfoUnderConstructionStriped bInfoUCStriped
|
||||
= new BlockInfoUnderConstructionStriped(blk, testSchema, cellSize);
|
||||
= new BlockInfoUnderConstructionStriped(blk, testECPolicy);
|
||||
bInfoUCStriped.setNumBytes(100);
|
||||
inf.addBlock(bInfoUCStriped);
|
||||
assertEquals(100, inf.computeFileSize());
|
||||
|
@ -172,7 +171,7 @@ public class TestStripedINodeFile {
|
|||
INodeFile inf = createStripedINodeFile();
|
||||
Block blk = new Block(1);
|
||||
BlockInfoStriped blockInfoStriped
|
||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
||||
= new BlockInfoStriped(blk, testECPolicy);
|
||||
blockInfoStriped.setNumBytes(100);
|
||||
inf.addBlock(blockInfoStriped);
|
||||
|
||||
|
@ -193,7 +192,7 @@ public class TestStripedINodeFile {
|
|||
INodeFile inf = createStripedINodeFile();
|
||||
Block blk = new Block(1);
|
||||
BlockInfoUnderConstructionStriped bInfoUCStriped
|
||||
= new BlockInfoUnderConstructionStriped(blk, testSchema, cellSize);
|
||||
= new BlockInfoUnderConstructionStriped(blk, testECPolicy);
|
||||
bInfoUCStriped.setNumBytes(100);
|
||||
inf.addBlock(bInfoUCStriped);
|
||||
|
||||
|
@ -235,7 +234,7 @@ public class TestStripedINodeFile {
|
|||
dfs.mkdirs(zone);
|
||||
|
||||
// create erasure zone
|
||||
dfs.createErasureCodingZone(zone, null, 0);
|
||||
dfs.createErasureCodingZone(zone, null);
|
||||
DFSTestUtil.createFile(dfs, zoneFile, len, (short) 1, 0xFEED);
|
||||
DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
|
||||
final FSDirectory fsd = fsn.getFSDirectory();
|
||||
|
|
|
@ -60,7 +60,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
|
|||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.waitActive();
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0);
|
||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||
fs = cluster.getFileSystem();
|
||||
Path eczone = new Path("/eczone");
|
||||
fs.mkdirs(eczone);
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
|
||||
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -84,8 +84,8 @@ public class TestStripedBlockUtil {
|
|||
private final int FULL_STRIPE_SIZE = DATA_BLK_NUM * CELLSIZE;
|
||||
/** number of full stripes in a full block group */
|
||||
private final int BLK_GROUP_STRIPE_NUM = 16;
|
||||
private final ECSchema SCEHMA = ErasureCodingSchemaManager.
|
||||
getSystemDefaultSchema();
|
||||
private final ErasureCodingPolicy ECPOLICY = ErasureCodingPolicyManager.
|
||||
getSystemDefaultPolicy();
|
||||
private final Random random = new Random();
|
||||
|
||||
private int[] blockGroupSizes;
|
||||
|
@ -152,7 +152,7 @@ public class TestStripedBlockUtil {
|
|||
int done = 0;
|
||||
while (done < bgSize) {
|
||||
Preconditions.checkState(done % CELLSIZE == 0);
|
||||
StripingCell cell = new StripingCell(SCEHMA, CELLSIZE, done / CELLSIZE, 0);
|
||||
StripingCell cell = new StripingCell(ECPOLICY, CELLSIZE, done / CELLSIZE, 0);
|
||||
int idxInStripe = cell.idxInStripe;
|
||||
int size = Math.min(CELLSIZE, bgSize - done);
|
||||
for (int i = 0; i < size; i++) {
|
||||
|
@ -245,7 +245,7 @@ public class TestStripedBlockUtil {
|
|||
if (brStart + brSize > bgSize) {
|
||||
continue;
|
||||
}
|
||||
AlignedStripe[] stripes = divideByteRangeIntoStripes(SCEHMA,
|
||||
AlignedStripe[] stripes = divideByteRangeIntoStripes(ECPOLICY,
|
||||
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
|
||||
|
||||
for (AlignedStripe stripe : stripes) {
|
||||
|
|
|
@ -65,7 +65,7 @@ public class TestJsonUtil {
|
|||
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
|
||||
now, now + 10, new FsPermission((short) 0644), "user", "group",
|
||||
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
|
||||
HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null, 0);
|
||||
HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null);
|
||||
final FileStatus fstatus = toFileStatus(status, parent);
|
||||
System.out.println("status = " + status);
|
||||
System.out.println("fstatus = " + fstatus);
|
||||
|
|
|
@ -57,11 +57,11 @@
|
|||
<comparators>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^[ \t]*Create a zone to encode files using a specified schema( )*</expected-output>
|
||||
<expected-output>^[ \t]*Create a zone to encode files using a specified policy( )*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^-createZone \[-s <schemaName>\] \[-c <cellSize>\] <path>(.)*</expected-output>
|
||||
<expected-output>^-createZone \[-s <policyName>\] <path>(.)*</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
@ -86,20 +86,20 @@
|
|||
</test>
|
||||
|
||||
<test>
|
||||
<description>help: listSchemas command</description>
|
||||
<description>help: listPolicies command</description>
|
||||
<test-commands>
|
||||
<ec-admin-command>-fs NAMENODE -help listSchemas</ec-admin-command>
|
||||
<ec-admin-command>-fs NAMENODE -help listPolicies</ec-admin-command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>Get the list of ECSchemas supported</expected-output>
|
||||
<expected-output>Get the list of erasure coding policies supported</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^-listSchemas (.)*</expected-output>
|
||||
<expected-output>^-listPolicies (.)*</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
@ -109,7 +109,7 @@
|
|||
<description>createZone : create a zone to encode files</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rmdir /eczone</command>
|
||||
|
@ -141,7 +141,7 @@
|
|||
</test>
|
||||
|
||||
<test>
|
||||
<description>createZone : default schema</description>
|
||||
<description>createZone : default policy</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
|
||||
|
@ -153,7 +153,7 @@
|
|||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
|
||||
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
@ -179,7 +179,7 @@
|
|||
<description>getZone : get information about the EC zone at specified path</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
|
||||
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
|
@ -188,7 +188,7 @@
|
|||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
|
||||
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
@ -197,7 +197,7 @@
|
|||
<description>getZone : get EC zone at specified file path</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
|
||||
<command>-fs NAMENODE -touchz /eczone/ecfile</command>
|
||||
<ec-admin-command>-fs NAMENODE -getZone /eczone/ecfile</ec-admin-command>
|
||||
</test-commands>
|
||||
|
@ -208,15 +208,15 @@
|
|||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
|
||||
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>listSchemas : get the list of ECSchemas supported</description>
|
||||
<description>listPolicies : get the list of ECPolicies supported</description>
|
||||
<test-commands>
|
||||
<ec-admin-command>-fs NAMENODE -listSchemas</ec-admin-command>
|
||||
<ec-admin-command>-fs NAMENODE -listPolicies</ec-admin-command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
</cleanup-commands>
|
||||
|
@ -247,7 +247,7 @@
|
|||
</test>
|
||||
|
||||
<test>
|
||||
<description>createZone : illegal parameters - schema name is missing</description>
|
||||
<description>createZone : illegal parameters - policy name is missing</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s</ec-admin-command>
|
||||
|
@ -281,10 +281,10 @@
|
|||
</test>
|
||||
|
||||
<test>
|
||||
<description>createZone : illegal parameters - invalidschema</description>
|
||||
<description>createZone : illegal parameters - invalidpolicy</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s invalidschema /eczone</ec-admin-command>
|
||||
<ec-admin-command>-fs NAMENODE -createZone -s invalidpolicy /eczone</ec-admin-command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rmdir /eczone</command>
|
||||
|
@ -292,7 +292,7 @@
|
|||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>Schema 'invalidschema' does not match any of the supported schemas. Please select any one of [RS-6-3]</expected-output>
|
||||
<expected-output>Policy 'invalidpolicy' does not match any of the supported policies. Please select any one of [RS-6-3-64k]</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
@ -359,16 +359,16 @@
|
|||
</test>
|
||||
|
||||
<test>
|
||||
<description>listSchemas : illegal parameters - too many parameters</description>
|
||||
<description>listPolicies : illegal parameters - too many parameters</description>
|
||||
<test-commands>
|
||||
<ec-admin-command>-fs NAMENODE -listSchemas /eczone</ec-admin-command>
|
||||
<ec-admin-command>-fs NAMENODE -listPolicies /eczone</ec-admin-command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>-listSchemas: Too many parameters</expected-output>
|
||||
<expected-output>-listPolicies: Too many parameters</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
|
Loading…
Reference in New Issue