HDFS-8854. Erasure coding: add ECPolicy to replace schema+cellSize in hadoop-hdfs. Contributed by Walter Su.
This commit is contained in:
parent
fbf7e81ca0
commit
1d37a88121
|
@ -29,12 +29,6 @@ public final class ECSchema {
|
||||||
public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
|
public static final String NUM_PARITY_UNITS_KEY = "numParityUnits";
|
||||||
public static final String CODEC_NAME_KEY = "codec";
|
public static final String CODEC_NAME_KEY = "codec";
|
||||||
|
|
||||||
/**
|
|
||||||
* A friendly and understandable name that can mean what's it, also serves as
|
|
||||||
* the identifier that distinguish it from other schemas.
|
|
||||||
*/
|
|
||||||
private final String schemaName;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The erasure codec name associated.
|
* The erasure codec name associated.
|
||||||
*/
|
*/
|
||||||
|
@ -59,14 +53,9 @@ public final class ECSchema {
|
||||||
/**
|
/**
|
||||||
* Constructor with schema name and provided all options. Note the options may
|
* Constructor with schema name and provided all options. Note the options may
|
||||||
* contain additional information for the erasure codec to interpret further.
|
* contain additional information for the erasure codec to interpret further.
|
||||||
* @param schemaName schema name
|
|
||||||
* @param allOptions all schema options
|
* @param allOptions all schema options
|
||||||
*/
|
*/
|
||||||
public ECSchema(String schemaName, Map<String, String> allOptions) {
|
public ECSchema(Map<String, String> allOptions) {
|
||||||
assert (schemaName != null && ! schemaName.isEmpty());
|
|
||||||
|
|
||||||
this.schemaName = schemaName;
|
|
||||||
|
|
||||||
if (allOptions == null || allOptions.isEmpty()) {
|
if (allOptions == null || allOptions.isEmpty()) {
|
||||||
throw new IllegalArgumentException("No schema options are provided");
|
throw new IllegalArgumentException("No schema options are provided");
|
||||||
}
|
}
|
||||||
|
@ -94,33 +83,27 @@ public final class ECSchema {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor with key parameters provided.
|
* Constructor with key parameters provided.
|
||||||
* @param schemaName schema name
|
|
||||||
* @param codecName codec name
|
* @param codecName codec name
|
||||||
* @param numDataUnits number of data units used in the schema
|
* @param numDataUnits number of data units used in the schema
|
||||||
* @param numParityUnits number os parity units used in the schema
|
* @param numParityUnits number os parity units used in the schema
|
||||||
*/
|
*/
|
||||||
public ECSchema(String schemaName, String codecName,
|
public ECSchema(String codecName, int numDataUnits, int numParityUnits) {
|
||||||
int numDataUnits, int numParityUnits) {
|
this(codecName, numDataUnits, numParityUnits, null);
|
||||||
this(schemaName, codecName, numDataUnits, numParityUnits, null);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor with key parameters provided. Note the extraOptions may contain
|
* Constructor with key parameters provided. Note the extraOptions may contain
|
||||||
* additional information for the erasure codec to interpret further.
|
* additional information for the erasure codec to interpret further.
|
||||||
* @param schemaName schema name
|
|
||||||
* @param codecName codec name
|
* @param codecName codec name
|
||||||
* @param numDataUnits number of data units used in the schema
|
* @param numDataUnits number of data units used in the schema
|
||||||
* @param numParityUnits number os parity units used in the schema
|
* @param numParityUnits number os parity units used in the schema
|
||||||
* @param extraOptions extra options to configure the codec
|
* @param extraOptions extra options to configure the codec
|
||||||
*/
|
*/
|
||||||
public ECSchema(String schemaName, String codecName, int numDataUnits,
|
public ECSchema(String codecName, int numDataUnits, int numParityUnits,
|
||||||
int numParityUnits, Map<String, String> extraOptions) {
|
Map<String, String> extraOptions) {
|
||||||
|
|
||||||
assert (schemaName != null && ! schemaName.isEmpty());
|
|
||||||
assert (codecName != null && ! codecName.isEmpty());
|
assert (codecName != null && ! codecName.isEmpty());
|
||||||
assert (numDataUnits > 0 && numParityUnits > 0);
|
assert (numDataUnits > 0 && numParityUnits > 0);
|
||||||
|
|
||||||
this.schemaName = schemaName;
|
|
||||||
this.codecName = codecName;
|
this.codecName = codecName;
|
||||||
this.numDataUnits = numDataUnits;
|
this.numDataUnits = numDataUnits;
|
||||||
this.numParityUnits = numParityUnits;
|
this.numParityUnits = numParityUnits;
|
||||||
|
@ -153,14 +136,6 @@ public final class ECSchema {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the schema name
|
|
||||||
* @return schema name
|
|
||||||
*/
|
|
||||||
public String getSchemaName() {
|
|
||||||
return schemaName;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the codec name
|
* Get the codec name
|
||||||
* @return codec name
|
* @return codec name
|
||||||
|
@ -201,7 +176,6 @@ public final class ECSchema {
|
||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder sb = new StringBuilder("ECSchema=[");
|
StringBuilder sb = new StringBuilder("ECSchema=[");
|
||||||
|
|
||||||
sb.append("Name=" + schemaName + ", ");
|
|
||||||
sb.append("Codec=" + codecName + ", ");
|
sb.append("Codec=" + codecName + ", ");
|
||||||
sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", ");
|
sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", ");
|
||||||
sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits);
|
sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits);
|
||||||
|
@ -235,9 +209,6 @@ public final class ECSchema {
|
||||||
if (numParityUnits != ecSchema.numParityUnits) {
|
if (numParityUnits != ecSchema.numParityUnits) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!schemaName.equals(ecSchema.schemaName)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (!codecName.equals(ecSchema.codecName)) {
|
if (!codecName.equals(ecSchema.codecName)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -246,8 +217,7 @@ public final class ECSchema {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
int result = schemaName.hashCode();
|
int result = codecName.hashCode();
|
||||||
result = 31 * result + codecName.hashCode();
|
|
||||||
result = 31 * result + extraOptions.hashCode();
|
result = 31 * result + extraOptions.hashCode();
|
||||||
result = 31 * result + numDataUnits;
|
result = 31 * result + numDataUnits;
|
||||||
result = 31 * result + numParityUnits;
|
result = 31 * result + numParityUnits;
|
||||||
|
|
|
@ -1,152 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.io.erasurecode;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import javax.xml.parsers.DocumentBuilder;
|
|
||||||
import javax.xml.parsers.DocumentBuilderFactory;
|
|
||||||
import javax.xml.parsers.ParserConfigurationException;
|
|
||||||
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import org.w3c.dom.Document;
|
|
||||||
import org.w3c.dom.Element;
|
|
||||||
import org.w3c.dom.Node;
|
|
||||||
import org.w3c.dom.NodeList;
|
|
||||||
import org.w3c.dom.Text;
|
|
||||||
import org.xml.sax.SAXException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A EC schema loading utility that loads predefined EC schemas from XML file
|
|
||||||
*/
|
|
||||||
public class SchemaLoader {
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(
|
|
||||||
SchemaLoader.class.getName());
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Load predefined ec schemas from configuration file. This file is
|
|
||||||
* expected to be in the XML format.
|
|
||||||
*/
|
|
||||||
public List<ECSchema> loadSchema(String schemaFilePath) {
|
|
||||||
File confFile = getSchemaFile(schemaFilePath);
|
|
||||||
if (confFile == null) {
|
|
||||||
LOG.warn("Not found any predefined EC schema file");
|
|
||||||
return Collections.emptyList();
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
return loadSchema(confFile);
|
|
||||||
} catch (ParserConfigurationException e) {
|
|
||||||
throw new RuntimeException("Failed to load schema file: " + confFile);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new RuntimeException("Failed to load schema file: " + confFile);
|
|
||||||
} catch (SAXException e) {
|
|
||||||
throw new RuntimeException("Failed to load schema file: " + confFile);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<ECSchema> loadSchema(File schemaFile)
|
|
||||||
throws ParserConfigurationException, IOException, SAXException {
|
|
||||||
|
|
||||||
LOG.info("Loading predefined EC schema file {}", schemaFile);
|
|
||||||
|
|
||||||
// Read and parse the schema file.
|
|
||||||
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
|
|
||||||
dbf.setIgnoringComments(true);
|
|
||||||
DocumentBuilder builder = dbf.newDocumentBuilder();
|
|
||||||
Document doc = builder.parse(schemaFile);
|
|
||||||
Element root = doc.getDocumentElement();
|
|
||||||
|
|
||||||
if (!"schemas".equals(root.getTagName())) {
|
|
||||||
throw new RuntimeException("Bad EC schema config file: " +
|
|
||||||
"top-level element not <schemas>");
|
|
||||||
}
|
|
||||||
|
|
||||||
NodeList elements = root.getChildNodes();
|
|
||||||
List<ECSchema> schemas = new ArrayList<ECSchema>();
|
|
||||||
for (int i = 0; i < elements.getLength(); i++) {
|
|
||||||
Node node = elements.item(i);
|
|
||||||
if (node instanceof Element) {
|
|
||||||
Element element = (Element) node;
|
|
||||||
if ("schema".equals(element.getTagName())) {
|
|
||||||
ECSchema schema = loadSchema(element);
|
|
||||||
schemas.add(schema);
|
|
||||||
} else {
|
|
||||||
LOG.warn("Bad element in EC schema configuration file: {}",
|
|
||||||
element.getTagName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return schemas;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Path to the XML file containing predefined ec schemas. If the path is
|
|
||||||
* relative, it is searched for in the classpath.
|
|
||||||
*/
|
|
||||||
private File getSchemaFile(String schemaFilePath) {
|
|
||||||
File schemaFile = new File(schemaFilePath);
|
|
||||||
if (! schemaFile.isAbsolute()) {
|
|
||||||
URL url = Thread.currentThread().getContextClassLoader()
|
|
||||||
.getResource(schemaFilePath);
|
|
||||||
if (url == null) {
|
|
||||||
LOG.warn("{} not found on the classpath.", schemaFilePath);
|
|
||||||
schemaFile = null;
|
|
||||||
} else if (! url.getProtocol().equalsIgnoreCase("file")) {
|
|
||||||
throw new RuntimeException(
|
|
||||||
"EC predefined schema file " + url +
|
|
||||||
" found on the classpath is not on the local filesystem.");
|
|
||||||
} else {
|
|
||||||
schemaFile = new File(url.getPath());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return schemaFile;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Loads a schema from a schema element in the configuration file
|
|
||||||
*/
|
|
||||||
private ECSchema loadSchema(Element element) {
|
|
||||||
String schemaName = element.getAttribute("name");
|
|
||||||
Map<String, String> ecOptions = new HashMap<String, String>();
|
|
||||||
NodeList fields = element.getChildNodes();
|
|
||||||
|
|
||||||
for (int i = 0; i < fields.getLength(); i++) {
|
|
||||||
Node fieldNode = fields.item(i);
|
|
||||||
if (fieldNode instanceof Element) {
|
|
||||||
Element field = (Element) fieldNode;
|
|
||||||
String tagName = field.getTagName();
|
|
||||||
String value = ((Text) field.getFirstChild()).getData().trim();
|
|
||||||
ecOptions.put(tagName, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ECSchema schema = new ECSchema(schemaName, ecOptions);
|
|
||||||
return schema;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -26,7 +26,6 @@ public class TestECSchema {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGoodSchema() {
|
public void testGoodSchema() {
|
||||||
String schemaName = "goodSchema";
|
|
||||||
int numDataUnits = 6;
|
int numDataUnits = 6;
|
||||||
int numParityUnits = 3;
|
int numParityUnits = 3;
|
||||||
String codec = "rs";
|
String codec = "rs";
|
||||||
|
@ -39,10 +38,9 @@ public class TestECSchema {
|
||||||
options.put(ECSchema.CODEC_NAME_KEY, codec);
|
options.put(ECSchema.CODEC_NAME_KEY, codec);
|
||||||
options.put(extraOption, extraOptionValue);
|
options.put(extraOption, extraOptionValue);
|
||||||
|
|
||||||
ECSchema schema = new ECSchema(schemaName, options);
|
ECSchema schema = new ECSchema(options);
|
||||||
System.out.println(schema.toString());
|
System.out.println(schema.toString());
|
||||||
|
|
||||||
assertEquals(schemaName, schema.getSchemaName());
|
|
||||||
assertEquals(numDataUnits, schema.getNumDataUnits());
|
assertEquals(numDataUnits, schema.getNumDataUnits());
|
||||||
assertEquals(numParityUnits, schema.getNumParityUnits());
|
assertEquals(numParityUnits, schema.getNumParityUnits());
|
||||||
assertEquals(codec, schema.getCodecName());
|
assertEquals(codec, schema.getCodecName());
|
||||||
|
|
|
@ -1,74 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.io.erasurecode;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileWriter;
|
|
||||||
import java.io.PrintWriter;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
public class TestSchemaLoader {
|
|
||||||
|
|
||||||
final static String TEST_DIR = new File(System.getProperty(
|
|
||||||
"test.build.data", "/tmp")).getAbsolutePath();
|
|
||||||
|
|
||||||
final static String SCHEMA_FILE = new File(TEST_DIR, "test-ecschema")
|
|
||||||
.getAbsolutePath();
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testLoadSchema() throws Exception {
|
|
||||||
PrintWriter out = new PrintWriter(new FileWriter(SCHEMA_FILE));
|
|
||||||
out.println("<?xml version=\"1.0\"?>");
|
|
||||||
out.println("<schemas>");
|
|
||||||
out.println(" <schema name=\"RSk6m3\">");
|
|
||||||
out.println(" <numDataUnits>6</numDataUnits>");
|
|
||||||
out.println(" <numParityUnits>3</numParityUnits>");
|
|
||||||
out.println(" <codec>RS</codec>");
|
|
||||||
out.println(" </schema>");
|
|
||||||
out.println(" <schema name=\"RSk10m4\">");
|
|
||||||
out.println(" <numDataUnits>10</numDataUnits>");
|
|
||||||
out.println(" <numParityUnits>4</numParityUnits>");
|
|
||||||
out.println(" <codec>RS</codec>");
|
|
||||||
out.println(" </schema>");
|
|
||||||
out.println("</schemas>");
|
|
||||||
out.close();
|
|
||||||
|
|
||||||
SchemaLoader schemaLoader = new SchemaLoader();
|
|
||||||
List<ECSchema> schemas = schemaLoader.loadSchema(SCHEMA_FILE);
|
|
||||||
|
|
||||||
assertEquals(2, schemas.size());
|
|
||||||
|
|
||||||
ECSchema schema1 = schemas.get(0);
|
|
||||||
assertEquals("RSk6m3", schema1.getSchemaName());
|
|
||||||
assertEquals(0, schema1.getExtraOptions().size());
|
|
||||||
assertEquals(6, schema1.getNumDataUnits());
|
|
||||||
assertEquals(3, schema1.getNumParityUnits());
|
|
||||||
assertEquals("RS", schema1.getCodecName());
|
|
||||||
|
|
||||||
ECSchema schema2 = schemas.get(1);
|
|
||||||
assertEquals("RSk10m4", schema2.getSchemaName());
|
|
||||||
assertEquals(0, schema2.getExtraOptions().size());
|
|
||||||
assertEquals(10, schema2.getNumDataUnits());
|
|
||||||
assertEquals(4, schema2.getNumParityUnits());
|
|
||||||
assertEquals("RS", schema2.getCodecName());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -183,8 +183,8 @@ public interface HdfsClientConfigKeys {
|
||||||
|
|
||||||
String THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size";
|
String THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size";
|
||||||
/**
|
/**
|
||||||
* With default 6+3 schema, each normal read could span 6 DNs. So this
|
* With default RS-6-3-64k erasure coding policy, each normal read could span
|
||||||
* default value accommodates 3 read streams
|
* 6 DNs, so this default value accommodates 3 read streams
|
||||||
*/
|
*/
|
||||||
int THREADPOOL_SIZE_DEFAULT = 18;
|
int THREADPOOL_SIZE_DEFAULT = 18;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,93 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
|
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A policy about how to write/read/code an erasure coding file.
|
||||||
|
*/
|
||||||
|
public final class ErasureCodingPolicy {
|
||||||
|
|
||||||
|
private final String name;
|
||||||
|
private final ECSchema schema;
|
||||||
|
private final int cellSize;
|
||||||
|
|
||||||
|
public ErasureCodingPolicy(String name, ECSchema schema, int cellSize){
|
||||||
|
this.name = name;
|
||||||
|
this.schema = schema;
|
||||||
|
this.cellSize = cellSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ECSchema getSchema() {
|
||||||
|
return schema;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getCellSize() {
|
||||||
|
return cellSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getNumDataUnits() {
|
||||||
|
return schema.getNumDataUnits();
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getNumParityUnits() {
|
||||||
|
return schema.getNumParityUnits();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (o == null || getClass() != o.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
ErasureCodingPolicy that = (ErasureCodingPolicy) o;
|
||||||
|
|
||||||
|
if (that.getName().equals(name) && that.getCellSize() == cellSize
|
||||||
|
&& that.getSchema().equals(schema)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
int result = name.hashCode();
|
||||||
|
result = 31 * result + schema.hashCode();
|
||||||
|
result = 31 * result + cellSize;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder sb = new StringBuilder("ErasureCodingPolicy=[");
|
||||||
|
sb.append("Name=" + name + ", ");
|
||||||
|
sb.append("Schema=[" + schema.toString() + "], ");
|
||||||
|
sb.append("CellSize=" + cellSize + " ");
|
||||||
|
sb.append("]");
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
}
|
|
@ -77,8 +77,8 @@ public final class HdfsConstants {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These values correspond to the values used by the system default erasure
|
* These values correspond to the values used by the system default erasure
|
||||||
* coding schema.
|
* coding policy.
|
||||||
* TODO: to be removed once all places use schema.
|
* TODO: get these values from ec policy of the associated INodeFile
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public static final byte NUM_DATA_BLOCKS = 6;
|
public static final byte NUM_DATA_BLOCKS = 6;
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
/** Interface that represents the over the wire information for a file.
|
/** Interface that represents the over the wire information for a file.
|
||||||
*/
|
*/
|
||||||
|
@ -49,8 +48,7 @@ public class HdfsFileStatus {
|
||||||
|
|
||||||
private final FileEncryptionInfo feInfo;
|
private final FileEncryptionInfo feInfo;
|
||||||
|
|
||||||
private final ECSchema ecSchema;
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
private final int stripeCellSize;
|
|
||||||
|
|
||||||
// Used by dir, not including dot and dotdot. Always zero for a regular file.
|
// Used by dir, not including dot and dotdot. Always zero for a regular file.
|
||||||
private final int childrenNum;
|
private final int childrenNum;
|
||||||
|
@ -77,7 +75,7 @@ public class HdfsFileStatus {
|
||||||
long blocksize, long modification_time, long access_time,
|
long blocksize, long modification_time, long access_time,
|
||||||
FsPermission permission, String owner, String group, byte[] symlink,
|
FsPermission permission, String owner, String group, byte[] symlink,
|
||||||
byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
|
byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
|
||||||
byte storagePolicy, ECSchema ecSchema, int stripeCellSize) {
|
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
||||||
this.length = length;
|
this.length = length;
|
||||||
this.isdir = isdir;
|
this.isdir = isdir;
|
||||||
this.block_replication = (short)block_replication;
|
this.block_replication = (short)block_replication;
|
||||||
|
@ -97,8 +95,7 @@ public class HdfsFileStatus {
|
||||||
this.childrenNum = childrenNum;
|
this.childrenNum = childrenNum;
|
||||||
this.feInfo = feInfo;
|
this.feInfo = feInfo;
|
||||||
this.storagePolicy = storagePolicy;
|
this.storagePolicy = storagePolicy;
|
||||||
this.ecSchema = ecSchema;
|
this.ecPolicy = ecPolicy;
|
||||||
this.stripeCellSize = stripeCellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -256,12 +253,8 @@ public class HdfsFileStatus {
|
||||||
return feInfo;
|
return feInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ECSchema getECSchema() {
|
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||||
return ecSchema;
|
return ecPolicy;
|
||||||
}
|
|
||||||
|
|
||||||
public int getStripeCellSize() {
|
|
||||||
return stripeCellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public final int getChildrenNum() {
|
public final int getChildrenNum() {
|
||||||
|
|
|
@ -24,7 +24,6 @@ import java.util.Comparator;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Collection of blocks with their locations and the file length.
|
* Collection of blocks with their locations and the file length.
|
||||||
|
@ -38,8 +37,7 @@ public class LocatedBlocks {
|
||||||
private final LocatedBlock lastLocatedBlock;
|
private final LocatedBlock lastLocatedBlock;
|
||||||
private final boolean isLastBlockComplete;
|
private final boolean isLastBlockComplete;
|
||||||
private final FileEncryptionInfo fileEncryptionInfo;
|
private final FileEncryptionInfo fileEncryptionInfo;
|
||||||
private final ECSchema ecSchema;
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
private final int stripeCellSize;
|
|
||||||
|
|
||||||
public LocatedBlocks() {
|
public LocatedBlocks() {
|
||||||
fileLength = 0;
|
fileLength = 0;
|
||||||
|
@ -48,22 +46,20 @@ public class LocatedBlocks {
|
||||||
lastLocatedBlock = null;
|
lastLocatedBlock = null;
|
||||||
isLastBlockComplete = false;
|
isLastBlockComplete = false;
|
||||||
fileEncryptionInfo = null;
|
fileEncryptionInfo = null;
|
||||||
ecSchema = null;
|
ecPolicy = null;
|
||||||
stripeCellSize = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public LocatedBlocks(long flength, boolean isUnderConstuction,
|
public LocatedBlocks(long flength, boolean isUnderConstuction,
|
||||||
List<LocatedBlock> blks, LocatedBlock lastBlock,
|
List<LocatedBlock> blks, LocatedBlock lastBlock,
|
||||||
boolean isLastBlockCompleted, FileEncryptionInfo feInfo,
|
boolean isLastBlockCompleted, FileEncryptionInfo feInfo,
|
||||||
ECSchema ecSchema, int stripeCellSize) {
|
ErasureCodingPolicy ecPolicy) {
|
||||||
fileLength = flength;
|
fileLength = flength;
|
||||||
blocks = blks;
|
blocks = blks;
|
||||||
underConstruction = isUnderConstuction;
|
underConstruction = isUnderConstuction;
|
||||||
this.lastLocatedBlock = lastBlock;
|
this.lastLocatedBlock = lastBlock;
|
||||||
this.isLastBlockComplete = isLastBlockCompleted;
|
this.isLastBlockComplete = isLastBlockCompleted;
|
||||||
this.fileEncryptionInfo = feInfo;
|
this.fileEncryptionInfo = feInfo;
|
||||||
this.ecSchema = ecSchema;
|
this.ecPolicy = ecPolicy;
|
||||||
this.stripeCellSize = stripeCellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -120,17 +116,10 @@ public class LocatedBlocks {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return The ECSchema for ErasureCoded file, null otherwise.
|
* @return The ECPolicy for ErasureCoded file, null otherwise.
|
||||||
*/
|
*/
|
||||||
public ECSchema getECSchema() {
|
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||||
return ecSchema;
|
return ecPolicy;
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Stripe Cell size for ErasureCoded file, 0 otherwise.
|
|
||||||
*/
|
|
||||||
public int getStripeCellSize() {
|
|
||||||
return stripeCellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class SnapshottableDirectoryStatus {
|
||||||
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
|
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
|
||||||
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
|
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
|
||||||
access_time, permission, owner, group, null, localName, inodeId,
|
access_time, permission, owner, group, null, localName, inodeId,
|
||||||
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null, 0);
|
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
|
||||||
this.snapshotNumber = snapshotNumber;
|
this.snapshotNumber = snapshotNumber;
|
||||||
this.snapshotQuota = snapshotQuota;
|
this.snapshotQuota = snapshotQuota;
|
||||||
this.parentFullPath = parentFullPath;
|
this.parentFullPath = parentFullPath;
|
||||||
|
|
|
@ -132,7 +132,7 @@ class JsonUtilClient {
|
||||||
blockSize, mTime, aTime, permission, owner, group,
|
blockSize, mTime, aTime, permission, owner, group,
|
||||||
symlink, DFSUtilClient.string2Bytes(localName),
|
symlink, DFSUtilClient.string2Bytes(localName),
|
||||||
fileId, childrenNum, null,
|
fileId, childrenNum, null,
|
||||||
storagePolicy, null, 0);
|
storagePolicy, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Convert a Json map to an ExtendedBlock object. */
|
/** Convert a Json map to an ExtendedBlock object. */
|
||||||
|
@ -479,7 +479,7 @@ class JsonUtilClient {
|
||||||
(Map<?, ?>) m.get("lastLocatedBlock"));
|
(Map<?, ?>) m.get("lastLocatedBlock"));
|
||||||
final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
|
final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
|
||||||
return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
|
return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
|
||||||
lastLocatedBlock, isLastBlockComplete, null, null, 0);
|
lastLocatedBlock, isLastBlockComplete, null, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -394,3 +394,6 @@
|
||||||
|
|
||||||
HDFS-8827. Erasure Coding: Fix NPE when NameNode processes over-replicated
|
HDFS-8827. Erasure Coding: Fix NPE when NameNode processes over-replicated
|
||||||
striped blocks. (Walter Su and Takuya Fukudome via jing9)
|
striped blocks. (Walter Su and Takuya Fukudome via jing9)
|
||||||
|
|
||||||
|
HDFS-8854. Erasure coding: add ECPolicy to replace schema+cellSize in
|
||||||
|
hadoop-hdfs. (Walter Su via zhz)
|
||||||
|
|
|
@ -165,7 +165,7 @@ import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.MD5Hash;
|
import org.apache.hadoop.io.MD5Hash;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
|
import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
@ -1194,10 +1194,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
try {
|
try {
|
||||||
LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
|
LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
|
||||||
if (locatedBlocks != null) {
|
if (locatedBlocks != null) {
|
||||||
ECSchema schema = locatedBlocks.getECSchema();
|
ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy();
|
||||||
if (schema != null) {
|
if (ecPolicy != null) {
|
||||||
return new DFSStripedInputStream(this, src, verifyChecksum, schema,
|
return new DFSStripedInputStream(this, src, verifyChecksum, ecPolicy,
|
||||||
locatedBlocks.getStripeCellSize(), locatedBlocks);
|
locatedBlocks);
|
||||||
}
|
}
|
||||||
return new DFSInputStream(this, src, verifyChecksum, locatedBlocks);
|
return new DFSInputStream(this, src, verifyChecksum, locatedBlocks);
|
||||||
} else {
|
} else {
|
||||||
|
@ -3011,12 +3011,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
return new EncryptionZoneIterator(namenode, traceSampler);
|
return new EncryptionZoneIterator(namenode, traceSampler);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
|
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("createErasureCodingZone", src);
|
TraceScope scope = getPathTraceScope("createErasureCodingZone", src);
|
||||||
try {
|
try {
|
||||||
namenode.createErasureCodingZone(src, schema, cellSize);
|
namenode.createErasureCodingZone(src, ecPolicy);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
throw re.unwrapRemoteException(AccessControlException.class,
|
throw re.unwrapRemoteException(AccessControlException.class,
|
||||||
SafeModeException.class,
|
SafeModeException.class,
|
||||||
|
@ -3138,11 +3138,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public ECSchema[] getECSchemas() throws IOException {
|
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("getECSchemas", traceSampler);
|
TraceScope scope = Trace.startSpan("getErasureCodingPolicies", traceSampler);
|
||||||
try {
|
try {
|
||||||
return namenode.getECSchemas();
|
return namenode.getErasureCodingPolicies();
|
||||||
} finally {
|
} finally {
|
||||||
scope.close();
|
scope.close();
|
||||||
}
|
}
|
||||||
|
|
|
@ -680,12 +680,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final boolean DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT =
|
public static final boolean DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT =
|
||||||
false;
|
false;
|
||||||
|
|
||||||
public static final String DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE =
|
|
||||||
"dfs.client.striped.read.threadpool.size";
|
|
||||||
// With default 3+2 schema, each normal read could span 3 DNs. So this
|
|
||||||
// default value accommodates 6 read streams
|
|
||||||
public static final int DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE = 18;
|
|
||||||
|
|
||||||
// Slow io warning log threshold settings for dfsclient and datanode.
|
// Slow io warning log threshold settings for dfsclient and datanode.
|
||||||
public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
|
public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
|
||||||
"dfs.datanode.slow.io.warning.threshold.ms";
|
"dfs.datanode.slow.io.warning.threshold.ms";
|
||||||
|
|
|
@ -271,7 +271,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
}
|
}
|
||||||
Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
|
Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
|
||||||
final DFSOutputStream out;
|
final DFSOutputStream out;
|
||||||
if(stat.getECSchema() != null) {
|
if(stat.getErasureCodingPolicy() != null) {
|
||||||
out = new DFSStripedOutputStream(dfsClient, src, stat,
|
out = new DFSStripedOutputStream(dfsClient, src, stat,
|
||||||
flag, progress, checksum, favoredNodes);
|
flag, progress, checksum, favoredNodes);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -36,7 +36,7 @@ import static org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResu
|
||||||
|
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
|
||||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||||
import org.apache.hadoop.util.DirectBufferPool;
|
import org.apache.hadoop.util.DirectBufferPool;
|
||||||
|
@ -147,7 +147,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
/** the buffer for a complete stripe */
|
/** the buffer for a complete stripe */
|
||||||
private ByteBuffer curStripeBuf;
|
private ByteBuffer curStripeBuf;
|
||||||
private ByteBuffer parityBuf;
|
private ByteBuffer parityBuf;
|
||||||
private final ECSchema schema;
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
private final RawErasureDecoder decoder;
|
private final RawErasureDecoder decoder;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -158,15 +158,15 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
private final CompletionService<Void> readingService;
|
private final CompletionService<Void> readingService;
|
||||||
|
|
||||||
DFSStripedInputStream(DFSClient dfsClient, String src,
|
DFSStripedInputStream(DFSClient dfsClient, String src,
|
||||||
boolean verifyChecksum, ECSchema schema, int cellSize,
|
boolean verifyChecksum, ErasureCodingPolicy ecPolicy,
|
||||||
LocatedBlocks locatedBlocks) throws IOException {
|
LocatedBlocks locatedBlocks) throws IOException {
|
||||||
super(dfsClient, src, verifyChecksum, locatedBlocks);
|
super(dfsClient, src, verifyChecksum, locatedBlocks);
|
||||||
|
|
||||||
assert schema != null;
|
assert ecPolicy != null;
|
||||||
this.schema = schema;
|
this.ecPolicy = ecPolicy;
|
||||||
this.cellSize = cellSize;
|
this.cellSize = ecPolicy.getCellSize();
|
||||||
dataBlkNum = (short) schema.getNumDataUnits();
|
dataBlkNum = (short) ecPolicy.getNumDataUnits();
|
||||||
parityBlkNum = (short) schema.getNumParityUnits();
|
parityBlkNum = (short) ecPolicy.getNumParityUnits();
|
||||||
groupSize = dataBlkNum + parityBlkNum;
|
groupSize = dataBlkNum + parityBlkNum;
|
||||||
blockReaders = new BlockReaderInfo[groupSize];
|
blockReaders = new BlockReaderInfo[groupSize];
|
||||||
curStripeRange = new StripeRange(0, 0);
|
curStripeRange = new StripeRange(0, 0);
|
||||||
|
@ -282,7 +282,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
stripeLimit - stripeBufOffset);
|
stripeLimit - stripeBufOffset);
|
||||||
|
|
||||||
LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
|
LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
|
||||||
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(schema, cellSize,
|
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, cellSize,
|
||||||
blockGroup, offsetInBlockGroup,
|
blockGroup, offsetInBlockGroup,
|
||||||
offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
|
offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
|
||||||
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
||||||
|
@ -510,7 +510,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
|
LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
|
||||||
|
|
||||||
AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
|
AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
|
||||||
schema, cellSize, blockGroup, start, end, buf, offset);
|
ecPolicy, cellSize, blockGroup, start, end, buf, offset);
|
||||||
CompletionService<Void> readService = new ExecutorCompletionService<>(
|
CompletionService<Void> readService = new ExecutorCompletionService<>(
|
||||||
dfsClient.getStripedReadsThreadPool());
|
dfsClient.getStripedReadsThreadPool());
|
||||||
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
||||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.io.MultipleIOException;
|
import org.apache.hadoop.io.MultipleIOException;
|
||||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
|
@ -276,10 +276,10 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
||||||
LOG.debug("Creating DFSStripedOutputStream for " + src);
|
LOG.debug("Creating DFSStripedOutputStream for " + src);
|
||||||
}
|
}
|
||||||
|
|
||||||
final ECSchema schema = stat.getECSchema();
|
final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
|
||||||
final int numParityBlocks = schema.getNumParityUnits();
|
final int numParityBlocks = ecPolicy.getNumParityUnits();
|
||||||
cellSize = stat.getStripeCellSize();
|
cellSize = ecPolicy.getCellSize();
|
||||||
numDataBlocks = schema.getNumDataUnits();
|
numDataBlocks = ecPolicy.getNumDataUnits();
|
||||||
numAllBlocks = numDataBlocks + numParityBlocks;
|
numAllBlocks = numDataBlocks + numParityBlocks;
|
||||||
|
|
||||||
encoder = CodecUtil.createRSRawEncoder(dfsClient.getConfiguration(),
|
encoder = CodecUtil.createRSRawEncoder(dfsClient.getConfiguration(),
|
||||||
|
|
|
@ -90,7 +90,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.Credentials;
|
import org.apache.hadoop.security.Credentials;
|
||||||
|
@ -2280,18 +2280,17 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
* Create the erasurecoding zone
|
* Create the erasurecoding zone
|
||||||
*
|
*
|
||||||
* @param path Directory to create the ec zone
|
* @param path Directory to create the ec zone
|
||||||
* @param schema ECSchema for the zone. If not specified default will be used.
|
* @param ecPolicy erasure coding policy for the zone. If not specified default will be used.
|
||||||
* @param cellSize Cellsize for the striped erasure coding
|
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void createErasureCodingZone(final Path path, final ECSchema schema,
|
public void createErasureCodingZone(final Path path, final ErasureCodingPolicy ecPolicy)
|
||||||
final int cellSize) throws IOException {
|
throws IOException {
|
||||||
Path absF = fixRelativePart(path);
|
Path absF = fixRelativePart(path);
|
||||||
new FileSystemLinkResolver<Void>() {
|
new FileSystemLinkResolver<Void>() {
|
||||||
@Override
|
@Override
|
||||||
public Void doCall(final Path p) throws IOException,
|
public Void doCall(final Path p) throws IOException,
|
||||||
UnresolvedLinkException {
|
UnresolvedLinkException {
|
||||||
dfs.createErasureCodingZone(getPathName(p), schema, cellSize);
|
dfs.createErasureCodingZone(getPathName(p), ecPolicy);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2299,7 +2298,7 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
public Void next(final FileSystem fs, final Path p) throws IOException {
|
public Void next(final FileSystem fs, final Path p) throws IOException {
|
||||||
if (fs instanceof DistributedFileSystem) {
|
if (fs instanceof DistributedFileSystem) {
|
||||||
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
|
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
|
||||||
myDfs.createErasureCodingZone(p, schema, cellSize);
|
myDfs.createErasureCodingZone(p, ecPolicy);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
throw new UnsupportedOperationException(
|
throw new UnsupportedOperationException(
|
||||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The public API for performing administrative functions on HDFS. Those writing
|
* The public API for performing administrative functions on HDFS. Those writing
|
||||||
|
@ -369,17 +369,13 @@ public class HdfsAdmin {
|
||||||
/**
|
/**
|
||||||
* Create the ErasureCoding zone
|
* Create the ErasureCoding zone
|
||||||
*
|
*
|
||||||
* @param path
|
* @param path Directory to create the ErasureCoding zone
|
||||||
* Directory to create the ErasureCoding zone
|
* @param ecPolicy erasure coding policy for the zone. If null, the default will be used.
|
||||||
* @param schema
|
|
||||||
* ECSchema for the zone. If not specified default will be used.
|
|
||||||
* @param cellSize
|
|
||||||
* Cellsize for the striped ErasureCoding
|
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void createErasureCodingZone(final Path path, final ECSchema schema,
|
public void createErasureCodingZone(final Path path,
|
||||||
final int cellSize) throws IOException {
|
final ErasureCodingPolicy ecPolicy) throws IOException {
|
||||||
dfs.createErasureCodingZone(path, schema, cellSize);
|
dfs.createErasureCodingZone(path, ecPolicy);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -395,12 +391,11 @@ public class HdfsAdmin {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the ErasureCoding schemas supported.
|
* Get the ErasureCoding policies supported.
|
||||||
*
|
*
|
||||||
* @return ECSchemas
|
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public ECSchema[] getECSchemas() throws IOException {
|
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||||
return dfs.getClient().getECSchemas();
|
return dfs.getClient().getErasureCodingPolicies();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,6 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.apache.hadoop.io.retry.AtMostOnce;
|
import org.apache.hadoop.io.retry.AtMostOnce;
|
||||||
import org.apache.hadoop.io.retry.Idempotent;
|
import org.apache.hadoop.io.retry.Idempotent;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
@ -1459,21 +1458,20 @@ public interface ClientProtocol {
|
||||||
public EventBatchList getEditsFromTxid(long txid) throws IOException;
|
public EventBatchList getEditsFromTxid(long txid) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create an erasure coding zone with specified schema, if any, otherwise
|
* Create an erasure coding zone with specified policy, if any, otherwise
|
||||||
* default
|
* default
|
||||||
*/
|
*/
|
||||||
@AtMostOnce
|
@AtMostOnce
|
||||||
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
|
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets list of ECSchemas loaded in Namenode
|
* Get the erasure coding policies loaded in Namenode
|
||||||
*
|
*
|
||||||
* @return Returns the list of ECSchemas loaded at Namenode
|
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Idempotent
|
@Idempotent
|
||||||
public ECSchema[] getECSchemas() throws IOException;
|
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the information about the EC zone for the path
|
* Get the information about the EC zone for the path
|
||||||
|
|
|
@ -16,21 +16,17 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocol;
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Information about the EC Zone at the specified path.
|
* Information about the EC Zone at the specified path.
|
||||||
*/
|
*/
|
||||||
public class ErasureCodingZone {
|
public class ErasureCodingZone {
|
||||||
|
|
||||||
private String dir;
|
private String dir;
|
||||||
private ECSchema schema;
|
private ErasureCodingPolicy ecPolicy;
|
||||||
private int cellSize;
|
|
||||||
|
|
||||||
public ErasureCodingZone(String dir, ECSchema schema, int cellSize) {
|
public ErasureCodingZone(String dir, ErasureCodingPolicy ecPolicy) {
|
||||||
this.dir = dir;
|
this.dir = dir;
|
||||||
this.schema = schema;
|
this.ecPolicy = ecPolicy;
|
||||||
this.cellSize = cellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -43,24 +39,16 @@ public class ErasureCodingZone {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the schema for the EC Zone
|
* Get the erasure coding policy for the EC Zone
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
public ECSchema getSchema() {
|
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||||
return schema;
|
return ecPolicy;
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get cellSize for the EC Zone
|
|
||||||
*/
|
|
||||||
public int getCellSize() {
|
|
||||||
return cellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "Dir: " + getDir() + ", Schema: " + schema + ", cellSize: "
|
return "Dir: " + getDir() + ", Policy: " + ecPolicy;
|
||||||
+ cellSize;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.fs.LocatedFileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface that represents the over the wire information
|
* Interface that represents the over the wire information
|
||||||
|
@ -60,10 +59,10 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
||||||
long access_time, FsPermission permission, String owner, String group,
|
long access_time, FsPermission permission, String owner, String group,
|
||||||
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
|
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
|
||||||
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
|
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
|
||||||
ECSchema schema, int stripeCellSize) {
|
ErasureCodingPolicy ecPolicy) {
|
||||||
super(length, isdir, block_replication, blocksize, modification_time,
|
super(length, isdir, block_replication, blocksize, modification_time,
|
||||||
access_time, permission, owner, group, symlink, path, fileId,
|
access_time, permission, owner, group, symlink, path, fileId,
|
||||||
childrenNum, feInfo, storagePolicy, schema, stripeCellSize);
|
childrenNum, feInfo, storagePolicy, ecPolicy);
|
||||||
this.locations = locations;
|
this.locations = locations;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -199,8 +199,8 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathR
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
|
||||||
|
@ -220,7 +220,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
||||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
|
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
|
||||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
|
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
|
||||||
|
@ -1403,10 +1403,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
RpcController controller, CreateErasureCodingZoneRequestProto req)
|
RpcController controller, CreateErasureCodingZoneRequestProto req)
|
||||||
throws ServiceException {
|
throws ServiceException {
|
||||||
try {
|
try {
|
||||||
ECSchema schema = req.hasSchema() ? PBHelper.convertECSchema(req
|
ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(req
|
||||||
.getSchema()) : null;
|
.getEcPolicy()) : null;
|
||||||
int cellSize = req.hasCellSize() ? req.getCellSize() : 0;
|
server.createErasureCodingZone(req.getSrc(), ecPolicy);
|
||||||
server.createErasureCodingZone(req.getSrc(), schema, cellSize);
|
|
||||||
return CreateErasureCodingZoneResponseProto.newBuilder().build();
|
return CreateErasureCodingZoneResponseProto.newBuilder().build();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
|
@ -1522,14 +1521,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public GetECSchemasResponseProto getECSchemas(RpcController controller,
|
public GetErasureCodingPoliciesResponseProto getErasureCodingPolicies(RpcController controller,
|
||||||
GetECSchemasRequestProto request) throws ServiceException {
|
GetErasureCodingPoliciesRequestProto request) throws ServiceException {
|
||||||
try {
|
try {
|
||||||
ECSchema[] ecSchemas = server.getECSchemas();
|
ErasureCodingPolicy[] ecPolicies = server.getErasureCodingPolicies();
|
||||||
GetECSchemasResponseProto.Builder resBuilder = GetECSchemasResponseProto
|
GetErasureCodingPoliciesResponseProto.Builder resBuilder = GetErasureCodingPoliciesResponseProto
|
||||||
.newBuilder();
|
.newBuilder();
|
||||||
for (ECSchema ecSchema : ecSchemas) {
|
for (ErasureCodingPolicy ecPolicy : ecPolicies) {
|
||||||
resBuilder.addSchemas(PBHelper.convertECSchema(ecSchema));
|
resBuilder.addEcPolicies(PBHelper.convertErasureCodingPolicy(ecPolicy));
|
||||||
}
|
}
|
||||||
return resBuilder.build();
|
return resBuilder.build();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -165,12 +165,12 @@ import org.apache.hadoop.hdfs.protocol.proto.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
|
||||||
|
@ -182,7 +182,7 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||||
|
@ -240,8 +240,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
VOID_GET_STORAGE_POLICIES_REQUEST =
|
VOID_GET_STORAGE_POLICIES_REQUEST =
|
||||||
GetStoragePoliciesRequestProto.newBuilder().build();
|
GetStoragePoliciesRequestProto.newBuilder().build();
|
||||||
|
|
||||||
private final static GetECSchemasRequestProto
|
private final static GetErasureCodingPoliciesRequestProto
|
||||||
VOID_GET_ECSCHEMAS_REQUEST = GetECSchemasRequestProto
|
VOID_GET_EC_POLICIES_REQUEST = GetErasureCodingPoliciesRequestProto
|
||||||
.newBuilder().build();
|
.newBuilder().build();
|
||||||
|
|
||||||
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
|
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
|
||||||
|
@ -1419,16 +1419,13 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
|
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final CreateErasureCodingZoneRequestProto.Builder builder =
|
final CreateErasureCodingZoneRequestProto.Builder builder =
|
||||||
CreateErasureCodingZoneRequestProto.newBuilder();
|
CreateErasureCodingZoneRequestProto.newBuilder();
|
||||||
builder.setSrc(src);
|
builder.setSrc(src);
|
||||||
if (schema != null) {
|
if (ecPolicy != null) {
|
||||||
builder.setSchema(PBHelper.convertECSchema(schema));
|
builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy));
|
||||||
}
|
|
||||||
if (cellSize > 0) {
|
|
||||||
builder.setCellSize(cellSize);
|
|
||||||
}
|
}
|
||||||
CreateErasureCodingZoneRequestProto req = builder.build();
|
CreateErasureCodingZoneRequestProto req = builder.build();
|
||||||
try {
|
try {
|
||||||
|
@ -1550,16 +1547,17 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ECSchema[] getECSchemas() throws IOException {
|
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||||
try {
|
try {
|
||||||
GetECSchemasResponseProto response = rpcProxy.getECSchemas(null,
|
GetErasureCodingPoliciesResponseProto response = rpcProxy
|
||||||
VOID_GET_ECSCHEMAS_REQUEST);
|
.getErasureCodingPolicies(null, VOID_GET_EC_POLICIES_REQUEST);
|
||||||
ECSchema[] schemas = new ECSchema[response.getSchemasCount()];
|
ErasureCodingPolicy[] ecPolicies =
|
||||||
|
new ErasureCodingPolicy[response.getEcPoliciesCount()];
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (ECSchemaProto schemaProto : response.getSchemasList()) {
|
for (ErasureCodingPolicyProto ecPolicyProto : response.getEcPoliciesList()) {
|
||||||
schemas[i++] = PBHelper.convertECSchema(schemaProto);
|
ecPolicies[i++] = PBHelper.convertErasureCodingPolicy(ecPolicyProto);
|
||||||
}
|
}
|
||||||
return schemas;
|
return ecPolicies;
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
throw ProtobufHelper.getRemoteException(e);
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
|
@ -137,6 +138,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecovery
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
|
||||||
|
@ -1348,8 +1350,7 @@ public class PBHelper {
|
||||||
PBHelper.convertLocatedBlockProto(lb.getLastBlock()) : null,
|
PBHelper.convertLocatedBlockProto(lb.getLastBlock()) : null,
|
||||||
lb.getIsLastBlockComplete(),
|
lb.getIsLastBlockComplete(),
|
||||||
lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : null,
|
lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : null,
|
||||||
lb.hasECSchema() ? convertECSchema(lb.getECSchema()) : null,
|
lb.hasEcPolicy() ? convertErasureCodingPolicy(lb.getEcPolicy()) : null);
|
||||||
lb.hasStripeCellSize() ? lb.getStripeCellSize() : 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static LocatedBlocksProto convert(LocatedBlocks lb) {
|
public static LocatedBlocksProto convert(LocatedBlocks lb) {
|
||||||
|
@ -1365,11 +1366,8 @@ public class PBHelper {
|
||||||
if (lb.getFileEncryptionInfo() != null) {
|
if (lb.getFileEncryptionInfo() != null) {
|
||||||
builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
|
builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
|
||||||
}
|
}
|
||||||
if (lb.getECSchema() != null) {
|
if (lb.getErasureCodingPolicy() != null) {
|
||||||
builder.setECSchema(convertECSchema(lb.getECSchema()));
|
builder.setEcPolicy(convertErasureCodingPolicy(lb.getErasureCodingPolicy()));
|
||||||
}
|
|
||||||
if (lb.getStripeCellSize() != 0) {
|
|
||||||
builder.setStripeCellSize(lb.getStripeCellSize());
|
|
||||||
}
|
}
|
||||||
return builder.setFileLength(lb.getFileLength())
|
return builder.setFileLength(lb.getFileLength())
|
||||||
.setUnderConstruction(lb.isUnderConstruction())
|
.setUnderConstruction(lb.isUnderConstruction())
|
||||||
|
@ -1514,8 +1512,7 @@ public class PBHelper {
|
||||||
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
|
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
|
||||||
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
|
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
|
||||||
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
|
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
|
||||||
fs.hasEcSchema() ? PBHelper.convertECSchema(fs.getEcSchema()) : null,
|
fs.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(fs.getEcPolicy()) : null);
|
||||||
fs.hasStripeCellSize() ? fs.getStripeCellSize() : 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SnapshottableDirectoryStatus convert(
|
public static SnapshottableDirectoryStatus convert(
|
||||||
|
@ -1576,10 +1573,9 @@ public class PBHelper {
|
||||||
builder.setLocations(PBHelper.convert(locations));
|
builder.setLocations(PBHelper.convert(locations));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(fs.getECSchema() != null) {
|
if(fs.getErasureCodingPolicy() != null) {
|
||||||
builder.setEcSchema(PBHelper.convertECSchema(fs.getECSchema()));
|
builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(fs.getErasureCodingPolicy()));
|
||||||
}
|
}
|
||||||
builder.setStripeCellSize(fs.getStripeCellSize());
|
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3137,13 +3133,12 @@ public class PBHelper {
|
||||||
for (ECSchemaOptionEntryProto option : optionsList) {
|
for (ECSchemaOptionEntryProto option : optionsList) {
|
||||||
options.put(option.getKey(), option.getValue());
|
options.put(option.getKey(), option.getValue());
|
||||||
}
|
}
|
||||||
return new ECSchema(schema.getSchemaName(), schema.getCodecName(),
|
return new ECSchema(schema.getCodecName(), schema.getDataUnits(),
|
||||||
schema.getDataUnits(), schema.getParityUnits(), options);
|
schema.getParityUnits(), options);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ECSchemaProto convertECSchema(ECSchema schema) {
|
public static ECSchemaProto convertECSchema(ECSchema schema) {
|
||||||
ECSchemaProto.Builder builder = ECSchemaProto.newBuilder()
|
ECSchemaProto.Builder builder = ECSchemaProto.newBuilder()
|
||||||
.setSchemaName(schema.getSchemaName())
|
|
||||||
.setCodecName(schema.getCodecName())
|
.setCodecName(schema.getCodecName())
|
||||||
.setDataUnits(schema.getNumDataUnits())
|
.setDataUnits(schema.getNumDataUnits())
|
||||||
.setParityUnits(schema.getNumParityUnits());
|
.setParityUnits(schema.getNumParityUnits());
|
||||||
|
@ -3155,17 +3150,34 @@ public class PBHelper {
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static ErasureCodingPolicy convertErasureCodingPolicy(
|
||||||
|
ErasureCodingPolicyProto policy) {
|
||||||
|
return new ErasureCodingPolicy(policy.getName(),
|
||||||
|
convertECSchema(policy.getSchema()),
|
||||||
|
policy.getCellSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ErasureCodingPolicyProto convertErasureCodingPolicy(
|
||||||
|
ErasureCodingPolicy policy) {
|
||||||
|
ErasureCodingPolicyProto.Builder builder = ErasureCodingPolicyProto
|
||||||
|
.newBuilder()
|
||||||
|
.setName(policy.getName())
|
||||||
|
.setSchema(convertECSchema(policy.getSchema()))
|
||||||
|
.setCellSize(policy.getCellSize());
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
public static ErasureCodingZoneProto convertErasureCodingZone(
|
public static ErasureCodingZoneProto convertErasureCodingZone(
|
||||||
ErasureCodingZone ecZone) {
|
ErasureCodingZone ecZone) {
|
||||||
return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir())
|
return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir())
|
||||||
.setSchema(convertECSchema(ecZone.getSchema()))
|
.setEcPolicy(convertErasureCodingPolicy(ecZone.getErasureCodingPolicy()))
|
||||||
.setCellSize(ecZone.getCellSize()).build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ErasureCodingZone convertErasureCodingZone(
|
public static ErasureCodingZone convertErasureCodingZone(
|
||||||
ErasureCodingZoneProto ecZoneProto) {
|
ErasureCodingZoneProto ecZoneProto) {
|
||||||
return new ErasureCodingZone(ecZoneProto.getDir(),
|
return new ErasureCodingZone(ecZoneProto.getDir(),
|
||||||
convertECSchema(ecZoneProto.getSchema()), ecZoneProto.getCellSize());
|
convertErasureCodingPolicy(ecZoneProto.getEcPolicy()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
|
public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
|
||||||
|
@ -3198,12 +3210,11 @@ public class PBHelper {
|
||||||
liveBlkIndices[i] = liveBlockIndicesList.get(i).shortValue();
|
liveBlkIndices[i] = liveBlockIndicesList.get(i).shortValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
ECSchema ecSchema = convertECSchema(blockEcRecoveryInfoProto.getEcSchema());
|
ErasureCodingPolicy ecPolicy =
|
||||||
int cellSize = blockEcRecoveryInfoProto.getCellSize();
|
convertErasureCodingPolicy(blockEcRecoveryInfoProto.getEcPolicy());
|
||||||
|
|
||||||
return new BlockECRecoveryInfo(block, sourceDnInfos, targetDnInfos,
|
return new BlockECRecoveryInfo(block, sourceDnInfos, targetDnInfos,
|
||||||
targetStorageUuids, convertStorageTypes, liveBlkIndices, ecSchema,
|
targetStorageUuids, convertStorageTypes, liveBlkIndices, ecPolicy);
|
||||||
cellSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static BlockECRecoveryInfoProto convertBlockECRecoveryInfo(
|
public static BlockECRecoveryInfoProto convertBlockECRecoveryInfo(
|
||||||
|
@ -3228,8 +3239,8 @@ public class PBHelper {
|
||||||
short[] liveBlockIndices = blockEcRecoveryInfo.getLiveBlockIndices();
|
short[] liveBlockIndices = blockEcRecoveryInfo.getLiveBlockIndices();
|
||||||
builder.addAllLiveBlockIndices(convertIntArray(liveBlockIndices));
|
builder.addAllLiveBlockIndices(convertIntArray(liveBlockIndices));
|
||||||
|
|
||||||
builder.setEcSchema(convertECSchema(blockEcRecoveryInfo.getECSchema()));
|
builder.setEcPolicy(convertErasureCodingPolicy(blockEcRecoveryInfo
|
||||||
builder.setCellSize(blockEcRecoveryInfo.getCellSize());
|
.getErasureCodingPolicy()));
|
||||||
|
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||||
|
|
||||||
|
@ -38,8 +38,7 @@ import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_S
|
||||||
* array to record the block index for each triplet.
|
* array to record the block index for each triplet.
|
||||||
*/
|
*/
|
||||||
public class BlockInfoStriped extends BlockInfo {
|
public class BlockInfoStriped extends BlockInfo {
|
||||||
private final ECSchema schema;
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
private final int cellSize;
|
|
||||||
/**
|
/**
|
||||||
* Always the same size with triplets. Record the block index for each triplet
|
* Always the same size with triplets. Record the block index for each triplet
|
||||||
* TODO: actually this is only necessary for over-replicated block. Thus can
|
* TODO: actually this is only necessary for over-replicated block. Thus can
|
||||||
|
@ -47,36 +46,34 @@ public class BlockInfoStriped extends BlockInfo {
|
||||||
*/
|
*/
|
||||||
private byte[] indices;
|
private byte[] indices;
|
||||||
|
|
||||||
public BlockInfoStriped(Block blk, ECSchema schema, int cellSize) {
|
public BlockInfoStriped(Block blk, ErasureCodingPolicy ecPolicy) {
|
||||||
super(blk, (short) (schema.getNumDataUnits() + schema.getNumParityUnits()));
|
super(blk, (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()));
|
||||||
indices = new byte[schema.getNumDataUnits() + schema.getNumParityUnits()];
|
indices = new byte[ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()];
|
||||||
initIndices();
|
initIndices();
|
||||||
this.schema = schema;
|
this.ecPolicy = ecPolicy;
|
||||||
this.cellSize = cellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockInfoStriped(BlockInfoStriped b) {
|
BlockInfoStriped(BlockInfoStriped b) {
|
||||||
this(b, b.getSchema(), b.getCellSize());
|
this(b, b.getErasureCodingPolicy());
|
||||||
this.setBlockCollection(b.getBlockCollection());
|
this.setBlockCollection(b.getBlockCollection());
|
||||||
}
|
}
|
||||||
|
|
||||||
public short getTotalBlockNum() {
|
public short getTotalBlockNum() {
|
||||||
return (short) (this.schema.getNumDataUnits()
|
return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
|
||||||
+ this.schema.getNumParityUnits());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public short getDataBlockNum() {
|
public short getDataBlockNum() {
|
||||||
return (short) this.schema.getNumDataUnits();
|
return (short) ecPolicy.getNumDataUnits();
|
||||||
}
|
}
|
||||||
|
|
||||||
public short getParityBlockNum() {
|
public short getParityBlockNum() {
|
||||||
return (short) this.schema.getNumParityUnits();
|
return (short) ecPolicy.getNumParityUnits();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the block is committed/completed and its length is less than a full
|
* If the block is committed/completed and its length is less than a full
|
||||||
* stripe, it returns the the number of actual data blocks.
|
* stripe, it returns the the number of actual data blocks.
|
||||||
* Otherwise it returns the number of data units specified by schema.
|
* Otherwise it returns the number of data units specified by erasure coding policy.
|
||||||
*/
|
*/
|
||||||
public short getRealDataBlockNum() {
|
public short getRealDataBlockNum() {
|
||||||
if (isComplete() || getBlockUCState() == BlockUCState.COMMITTED) {
|
if (isComplete() || getBlockUCState() == BlockUCState.COMMITTED) {
|
||||||
|
@ -91,12 +88,8 @@ public class BlockInfoStriped extends BlockInfo {
|
||||||
return (short) (getRealDataBlockNum() + getParityBlockNum());
|
return (short) (getRealDataBlockNum() + getParityBlockNum());
|
||||||
}
|
}
|
||||||
|
|
||||||
public ECSchema getSchema() {
|
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||||
return schema;
|
return ecPolicy;
|
||||||
}
|
|
||||||
|
|
||||||
public int getCellSize() {
|
|
||||||
return cellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initIndices() {
|
private void initIndices() {
|
||||||
|
@ -230,7 +223,7 @@ public class BlockInfoStriped extends BlockInfo {
|
||||||
// be the total of data blocks and parity blocks because
|
// be the total of data blocks and parity blocks because
|
||||||
// `getNumBytes` is the total of actual data block size.
|
// `getNumBytes` is the total of actual data block size.
|
||||||
return StripedBlockUtil.spaceConsumedByStripedBlock(getNumBytes(),
|
return StripedBlockUtil.spaceConsumedByStripedBlock(getNumBytes(),
|
||||||
this.schema.getNumDataUnits(), this.schema.getNumParityUnits(),
|
ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits(),
|
||||||
BLOCK_STRIPED_CELL_SIZE);
|
BLOCK_STRIPED_CELL_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,7 +253,7 @@ public class BlockInfoStriped extends BlockInfo {
|
||||||
BlockUCState s, DatanodeStorageInfo[] targets) {
|
BlockUCState s, DatanodeStorageInfo[] targets) {
|
||||||
final BlockInfoUnderConstructionStriped ucBlock;
|
final BlockInfoUnderConstructionStriped ucBlock;
|
||||||
if(isComplete()) {
|
if(isComplete()) {
|
||||||
ucBlock = new BlockInfoUnderConstructionStriped(this, schema, cellSize,
|
ucBlock = new BlockInfoUnderConstructionStriped(this, ecPolicy,
|
||||||
s, targets);
|
s, targets);
|
||||||
ucBlock.setBlockCollection(getBlockCollection());
|
ucBlock.setBlockCollection(getBlockCollection());
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -57,17 +57,16 @@ public class BlockInfoUnderConstructionStriped extends BlockInfoStriped
|
||||||
/**
|
/**
|
||||||
* Constructor with null storage targets.
|
* Constructor with null storage targets.
|
||||||
*/
|
*/
|
||||||
public BlockInfoUnderConstructionStriped(Block blk, ECSchema schema,
|
public BlockInfoUnderConstructionStriped(Block blk, ErasureCodingPolicy ecPolicy) {
|
||||||
int cellSize) {
|
this(blk, ecPolicy, UNDER_CONSTRUCTION, null);
|
||||||
this(blk, schema, cellSize, UNDER_CONSTRUCTION, null);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a striped block that is currently being constructed.
|
* Create a striped block that is currently being constructed.
|
||||||
*/
|
*/
|
||||||
public BlockInfoUnderConstructionStriped(Block blk, ECSchema schema,
|
public BlockInfoUnderConstructionStriped(Block blk, ErasureCodingPolicy ecPolicy,
|
||||||
int cellSize, BlockUCState state, DatanodeStorageInfo[] targets) {
|
BlockUCState state, DatanodeStorageInfo[] targets) {
|
||||||
super(blk, schema, cellSize);
|
super(blk, ecPolicy);
|
||||||
assert getBlockUCState() != COMPLETE :
|
assert getBlockUCState() != COMPLETE :
|
||||||
"BlockInfoUnderConstructionStriped cannot be in COMPLETE state";
|
"BlockInfoUnderConstructionStriped cannot be in COMPLETE state";
|
||||||
this.blockUCState = state;
|
this.blockUCState = state;
|
||||||
|
|
|
@ -85,7 +85,7 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||||
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
|
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
|
||||||
|
@ -948,14 +948,13 @@ public class BlockManager {
|
||||||
ErasureCodingZone ecZone)
|
ErasureCodingZone ecZone)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert namesystem.hasReadLock();
|
assert namesystem.hasReadLock();
|
||||||
final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
|
final ErasureCodingPolicy ecPolicy = ecZone != null ? ecZone
|
||||||
final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
|
.getErasureCodingPolicy() : null;
|
||||||
if (blocks == null) {
|
if (blocks == null) {
|
||||||
return null;
|
return null;
|
||||||
} else if (blocks.length == 0) {
|
} else if (blocks.length == 0) {
|
||||||
return new LocatedBlocks(0, isFileUnderConstruction,
|
return new LocatedBlocks(0, isFileUnderConstruction,
|
||||||
Collections.<LocatedBlock> emptyList(), null, false, feInfo, schema,
|
Collections.<LocatedBlock> emptyList(), null, false, feInfo, ecPolicy);
|
||||||
cellSize);
|
|
||||||
} else {
|
} else {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("blocks = " + java.util.Arrays.asList(blocks));
|
LOG.debug("blocks = " + java.util.Arrays.asList(blocks));
|
||||||
|
@ -980,7 +979,7 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction,
|
return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction,
|
||||||
isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
|
isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
|
||||||
schema, cellSize);
|
ecPolicy);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1597,7 +1596,7 @@ public class BlockManager {
|
||||||
.warn("Failed to get the EC zone for the file {} ", src);
|
.warn("Failed to get the EC zone for the file {} ", src);
|
||||||
}
|
}
|
||||||
if (ecZone == null) {
|
if (ecZone == null) {
|
||||||
blockLog.warn("No EC schema found for the file {}. "
|
blockLog.warn("No erasure coding policy found for the file {}. "
|
||||||
+ "So cannot proceed for recovery", src);
|
+ "So cannot proceed for recovery", src);
|
||||||
// TODO: we may have to revisit later for what we can do better to
|
// TODO: we may have to revisit later for what we can do better to
|
||||||
// handle this case.
|
// handle this case.
|
||||||
|
@ -1607,7 +1606,7 @@ public class BlockManager {
|
||||||
new ExtendedBlock(namesystem.getBlockPoolId(), block),
|
new ExtendedBlock(namesystem.getBlockPoolId(), block),
|
||||||
rw.srcNodes, rw.targets,
|
rw.srcNodes, rw.targets,
|
||||||
((ErasureCodingWork) rw).liveBlockIndicies,
|
((ErasureCodingWork) rw).liveBlockIndicies,
|
||||||
ecZone.getSchema(), ecZone.getCellSize());
|
ecZone.getErasureCodingPolicy());
|
||||||
} else {
|
} else {
|
||||||
rw.srcNodes[0].addBlockToBeReplicated(block, targets);
|
rw.srcNodes[0].addBlockToBeReplicated(block, targets);
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||||
import org.apache.hadoop.hdfs.util.EnumCounters;
|
import org.apache.hadoop.hdfs.util.EnumCounters;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.util.IntrusiveCollection;
|
import org.apache.hadoop.util.IntrusiveCollection;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
|
@ -610,10 +610,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
*/
|
*/
|
||||||
void addBlockToBeErasureCoded(ExtendedBlock block,
|
void addBlockToBeErasureCoded(ExtendedBlock block,
|
||||||
DatanodeDescriptor[] sources, DatanodeStorageInfo[] targets,
|
DatanodeDescriptor[] sources, DatanodeStorageInfo[] targets,
|
||||||
short[] liveBlockIndices, ECSchema ecSchema, int cellSize) {
|
short[] liveBlockIndices, ErasureCodingPolicy ecPolicy) {
|
||||||
assert (block != null && sources != null && sources.length > 0);
|
assert (block != null && sources != null && sources.length > 0);
|
||||||
BlockECRecoveryInfo task = new BlockECRecoveryInfo(block, sources, targets,
|
BlockECRecoveryInfo task = new BlockECRecoveryInfo(block, sources, targets,
|
||||||
liveBlockIndices, ecSchema, cellSize);
|
liveBlockIndices, ecPolicy);
|
||||||
erasurecodeBlocks.offer(task);
|
erasurecodeBlocks.offer(task);
|
||||||
BlockManager.LOG.debug("Adding block recovery task " + task + "to "
|
BlockManager.LOG.debug("Adding block recovery task " + task + "to "
|
||||||
+ getName() + ", current queue size is " + erasurecodeBlocks.size());
|
+ getName() + ", current queue size is " + erasurecodeBlocks.size());
|
||||||
|
|
|
@ -54,7 +54,7 @@ public class StorageLocation {
|
||||||
// drop any (illegal) authority in the URI for backwards compatibility
|
// drop any (illegal) authority in the URI for backwards compatibility
|
||||||
this.file = new File(uri.getPath());
|
this.file = new File(uri.getPath());
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalArgumentException("Unsupported URI schema in " + uri);
|
throw new IllegalArgumentException("Unsupported URI ecPolicy in " + uri);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,8 +69,7 @@ import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResult;
|
import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResult;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
|
|
||||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -267,10 +266,10 @@ public final class ErasureCodingWorker {
|
||||||
new ExecutorCompletionService<>(STRIPED_READ_THREAD_POOL);
|
new ExecutorCompletionService<>(STRIPED_READ_THREAD_POOL);
|
||||||
|
|
||||||
ReconstructAndTransferBlock(BlockECRecoveryInfo recoveryInfo) {
|
ReconstructAndTransferBlock(BlockECRecoveryInfo recoveryInfo) {
|
||||||
ECSchema schema = recoveryInfo.getECSchema();
|
ErasureCodingPolicy ecPolicy = recoveryInfo.getErasureCodingPolicy();
|
||||||
dataBlkNum = schema.getNumDataUnits();
|
dataBlkNum = ecPolicy.getNumDataUnits();
|
||||||
parityBlkNum = schema.getNumParityUnits();
|
parityBlkNum = ecPolicy.getNumParityUnits();
|
||||||
cellSize = recoveryInfo.getCellSize();
|
cellSize = ecPolicy.getCellSize();
|
||||||
|
|
||||||
blockGroup = recoveryInfo.getExtendedBlock();
|
blockGroup = recoveryInfo.getExtendedBlock();
|
||||||
final int cellsNum = (int)((blockGroup.getNumBytes() - 1) / cellSize + 1);
|
final int cellsNum = (int)((blockGroup.getNumBytes() - 1) / cellSize + 1);
|
||||||
|
|
|
@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
@ -177,7 +177,7 @@ public class Mover {
|
||||||
}
|
}
|
||||||
|
|
||||||
DBlock newDBlock(LocatedBlock lb, List<MLocation> locations,
|
DBlock newDBlock(LocatedBlock lb, List<MLocation> locations,
|
||||||
ECSchema ecSchema) {
|
ErasureCodingPolicy ecPolicy) {
|
||||||
Block blk = lb.getBlock().getLocalBlock();
|
Block blk = lb.getBlock().getLocalBlock();
|
||||||
DBlock db;
|
DBlock db;
|
||||||
if (lb.isStriped()) {
|
if (lb.isStriped()) {
|
||||||
|
@ -186,7 +186,7 @@ public class Mover {
|
||||||
for (int i = 0; i < indices.length; i++) {
|
for (int i = 0; i < indices.length; i++) {
|
||||||
indices[i] = (byte) lsb.getBlockIndices()[i];
|
indices[i] = (byte) lsb.getBlockIndices()[i];
|
||||||
}
|
}
|
||||||
db = new DBlockStriped(blk, indices, (short) ecSchema.getNumDataUnits());
|
db = new DBlockStriped(blk, indices, (short) ecPolicy.getNumDataUnits());
|
||||||
} else {
|
} else {
|
||||||
db = new DBlock(blk);
|
db = new DBlock(blk);
|
||||||
}
|
}
|
||||||
|
@ -373,7 +373,7 @@ public class Mover {
|
||||||
List<StorageType> types = policy.chooseStorageTypes(
|
List<StorageType> types = policy.chooseStorageTypes(
|
||||||
status.getReplication());
|
status.getReplication());
|
||||||
|
|
||||||
final ECSchema ecSchema = status.getECSchema();
|
final ErasureCodingPolicy ecPolicy = status.getErasureCodingPolicy();
|
||||||
final LocatedBlocks locatedBlocks = status.getBlockLocations();
|
final LocatedBlocks locatedBlocks = status.getBlockLocations();
|
||||||
boolean hasRemaining = false;
|
boolean hasRemaining = false;
|
||||||
final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
|
final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
|
||||||
|
@ -390,7 +390,7 @@ public class Mover {
|
||||||
final StorageTypeDiff diff = new StorageTypeDiff(types,
|
final StorageTypeDiff diff = new StorageTypeDiff(types,
|
||||||
lb.getStorageTypes());
|
lb.getStorageTypes());
|
||||||
if (!diff.removeOverlap(true)) {
|
if (!diff.removeOverlap(true)) {
|
||||||
if (scheduleMoves4Block(diff, lb, ecSchema)) {
|
if (scheduleMoves4Block(diff, lb, ecPolicy)) {
|
||||||
hasRemaining |= (diff.existing.size() > 1 &&
|
hasRemaining |= (diff.existing.size() > 1 &&
|
||||||
diff.expected.size() > 1);
|
diff.expected.size() > 1);
|
||||||
}
|
}
|
||||||
|
@ -400,12 +400,12 @@ public class Mover {
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean scheduleMoves4Block(StorageTypeDiff diff, LocatedBlock lb,
|
boolean scheduleMoves4Block(StorageTypeDiff diff, LocatedBlock lb,
|
||||||
ECSchema ecSchema) {
|
ErasureCodingPolicy ecPolicy) {
|
||||||
final List<MLocation> locations = MLocation.toLocations(lb);
|
final List<MLocation> locations = MLocation.toLocations(lb);
|
||||||
if (!(lb instanceof LocatedStripedBlock)) {
|
if (!(lb instanceof LocatedStripedBlock)) {
|
||||||
Collections.shuffle(locations);
|
Collections.shuffle(locations);
|
||||||
}
|
}
|
||||||
final DBlock db = newDBlock(lb, locations, ecSchema);
|
final DBlock db = newDBlock(lb, locations, ecPolicy);
|
||||||
|
|
||||||
for (final StorageType t : diff.existing) {
|
for (final StorageType t : diff.existing) {
|
||||||
for (final MLocation ml : locations) {
|
for (final MLocation ml : locations) {
|
||||||
|
|
|
@ -0,0 +1,115 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This manages erasure coding policies predefined and activated in the system.
|
||||||
|
* It loads customized policies and syncs with persisted ones in
|
||||||
|
* NameNode image.
|
||||||
|
*
|
||||||
|
* This class is instantiated by the FSNamesystem.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||||
|
public final class ErasureCodingPolicyManager {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TODO: HDFS-8095
|
||||||
|
*/
|
||||||
|
private static final int DEFAULT_DATA_BLOCKS = 6;
|
||||||
|
private static final int DEFAULT_PARITY_BLOCKS = 3;
|
||||||
|
private static final int DEFAULT_CELLSIZE = 64 * 1024;
|
||||||
|
private static final String DEFAULT_CODEC_NAME = "rs";
|
||||||
|
private static final String DEFAULT_POLICY_NAME = "RS-6-3-64k";
|
||||||
|
private static final ECSchema SYS_DEFAULT_SCHEMA = new ECSchema(
|
||||||
|
DEFAULT_CODEC_NAME, DEFAULT_DATA_BLOCKS, DEFAULT_PARITY_BLOCKS);
|
||||||
|
private static final ErasureCodingPolicy SYS_DEFAULT_POLICY =
|
||||||
|
new ErasureCodingPolicy(DEFAULT_POLICY_NAME, SYS_DEFAULT_SCHEMA,
|
||||||
|
DEFAULT_CELLSIZE);
|
||||||
|
|
||||||
|
//We may add more later.
|
||||||
|
private static ErasureCodingPolicy[] SYS_POLICY = new ErasureCodingPolicy[] {
|
||||||
|
SYS_DEFAULT_POLICY
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All active policies maintained in NN memory for fast querying,
|
||||||
|
* identified and sorted by its name.
|
||||||
|
*/
|
||||||
|
private final Map<String, ErasureCodingPolicy> activePolicies;
|
||||||
|
|
||||||
|
ErasureCodingPolicyManager() {
|
||||||
|
|
||||||
|
this.activePolicies = new TreeMap<>();
|
||||||
|
for (ErasureCodingPolicy policy : SYS_POLICY) {
|
||||||
|
activePolicies.put(policy.getName(), policy);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TODO: HDFS-7859 persist into NameNode
|
||||||
|
* load persistent policies from image and editlog, which is done only once
|
||||||
|
* during NameNode startup. This can be done here or in a separate method.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get system defined policies.
|
||||||
|
* @return system policies
|
||||||
|
*/
|
||||||
|
public static ErasureCodingPolicy[] getSystemPolices() {
|
||||||
|
return SYS_POLICY;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get system-wide default policy, which can be used by default
|
||||||
|
* when no policy is specified for an EC zone.
|
||||||
|
* @return ecPolicy
|
||||||
|
*/
|
||||||
|
public static ErasureCodingPolicy getSystemDefaultPolicy() {
|
||||||
|
return SYS_DEFAULT_POLICY;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all policies that's available to use.
|
||||||
|
* @return all policies
|
||||||
|
*/
|
||||||
|
public ErasureCodingPolicy[] getPolicies() {
|
||||||
|
ErasureCodingPolicy[] results = new ErasureCodingPolicy[activePolicies.size()];
|
||||||
|
return activePolicies.values().toArray(results);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the policy specified by the policy name.
|
||||||
|
*/
|
||||||
|
public ErasureCodingPolicy getPolicy(String name) {
|
||||||
|
return activePolicies.get(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear and clean up
|
||||||
|
*/
|
||||||
|
public void clear() {
|
||||||
|
activePolicies.clear();
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,127 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This manages EC schemas predefined and activated in the system.
|
|
||||||
* It loads customized schemas and syncs with persisted ones in
|
|
||||||
* NameNode image.
|
|
||||||
*
|
|
||||||
* This class is instantiated by the FSNamesystem.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
|
||||||
public final class ErasureCodingSchemaManager {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* TODO: HDFS-8095
|
|
||||||
*/
|
|
||||||
private static final int DEFAULT_DATA_BLOCKS = 6;
|
|
||||||
private static final int DEFAULT_PARITY_BLOCKS = 3;
|
|
||||||
private static final String DEFAULT_CODEC_NAME = "rs";
|
|
||||||
private static final String DEFAULT_SCHEMA_NAME = "RS-6-3";
|
|
||||||
private static final ECSchema SYS_DEFAULT_SCHEMA =
|
|
||||||
new ECSchema(DEFAULT_SCHEMA_NAME,
|
|
||||||
DEFAULT_CODEC_NAME, DEFAULT_DATA_BLOCKS, DEFAULT_PARITY_BLOCKS);
|
|
||||||
|
|
||||||
//We may add more later.
|
|
||||||
private static ECSchema[] SYS_SCHEMAS = new ECSchema[] {
|
|
||||||
SYS_DEFAULT_SCHEMA
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* All active EC activeSchemas maintained in NN memory for fast querying,
|
|
||||||
* identified and sorted by its name.
|
|
||||||
*/
|
|
||||||
private final Map<String, ECSchema> activeSchemas;
|
|
||||||
|
|
||||||
ErasureCodingSchemaManager() {
|
|
||||||
|
|
||||||
this.activeSchemas = new TreeMap<String, ECSchema>();
|
|
||||||
for (ECSchema schema : SYS_SCHEMAS) {
|
|
||||||
activeSchemas.put(schema.getSchemaName(), schema);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* TODO: HDFS-7859 persist into NameNode
|
|
||||||
* load persistent schemas from image and editlog, which is done only once
|
|
||||||
* during NameNode startup. This can be done here or in a separate method.
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get system defined schemas.
|
|
||||||
* @return system schemas
|
|
||||||
*/
|
|
||||||
public static ECSchema[] getSystemSchemas() {
|
|
||||||
return SYS_SCHEMAS;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get system-wide default EC schema, which can be used by default when no
|
|
||||||
* schema is specified for an EC zone.
|
|
||||||
* @return schema
|
|
||||||
*/
|
|
||||||
public static ECSchema getSystemDefaultSchema() {
|
|
||||||
return SYS_DEFAULT_SCHEMA;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Tell the specified schema is the system default one or not.
|
|
||||||
* @param schema
|
|
||||||
* @return true if it's the default false otherwise
|
|
||||||
*/
|
|
||||||
public static boolean isSystemDefault(ECSchema schema) {
|
|
||||||
if (schema == null) {
|
|
||||||
throw new IllegalArgumentException("Invalid schema parameter");
|
|
||||||
}
|
|
||||||
|
|
||||||
// schema name is the identifier.
|
|
||||||
return SYS_DEFAULT_SCHEMA.getSchemaName().equals(schema.getSchemaName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get all EC schemas that's available to use.
|
|
||||||
* @return all EC schemas
|
|
||||||
*/
|
|
||||||
public ECSchema[] getSchemas() {
|
|
||||||
ECSchema[] results = new ECSchema[activeSchemas.size()];
|
|
||||||
return activeSchemas.values().toArray(results);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the EC schema specified by the schema name.
|
|
||||||
* @param schemaName
|
|
||||||
* @return EC schema specified by the schema name
|
|
||||||
*/
|
|
||||||
public ECSchema getSchema(String schemaName) {
|
|
||||||
return activeSchemas.get(schemaName);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Clear and clean up
|
|
||||||
*/
|
|
||||||
public void clear() {
|
|
||||||
activeSchemas.clear();
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -23,11 +23,10 @@ import com.google.common.collect.Lists;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
|
@ -60,9 +59,9 @@ public class ErasureCodingZoneManager {
|
||||||
this.dir = dir;
|
this.dir = dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
ECSchema getErasureCodingSchema(INodesInPath iip) throws IOException {
|
ErasureCodingPolicy getErasureCodingPolicy(INodesInPath iip) throws IOException {
|
||||||
ErasureCodingZone ecZone = getErasureCodingZone(iip);
|
ErasureCodingZone ecZone = getErasureCodingZone(iip);
|
||||||
return ecZone == null ? null : ecZone.getSchema();
|
return ecZone == null ? null : ecZone.getErasureCodingPolicy();
|
||||||
}
|
}
|
||||||
|
|
||||||
ErasureCodingZone getErasureCodingZone(INodesInPath iip) throws IOException {
|
ErasureCodingZone getErasureCodingZone(INodesInPath iip) throws IOException {
|
||||||
|
@ -88,12 +87,11 @@ public class ErasureCodingZoneManager {
|
||||||
if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) {
|
if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) {
|
||||||
ByteArrayInputStream bIn=new ByteArrayInputStream(xAttr.getValue());
|
ByteArrayInputStream bIn=new ByteArrayInputStream(xAttr.getValue());
|
||||||
DataInputStream dIn=new DataInputStream(bIn);
|
DataInputStream dIn=new DataInputStream(bIn);
|
||||||
int cellSize = WritableUtils.readVInt(dIn);
|
String ecPolicyName = WritableUtils.readString(dIn);
|
||||||
String schemaName = WritableUtils.readString(dIn);
|
ErasureCodingPolicy ecPolicy = dir.getFSNamesystem()
|
||||||
ECSchema schema = dir.getFSNamesystem()
|
.getErasureCodingPolicyManager().getPolicy(ecPolicyName);
|
||||||
.getErasureCodingSchemaManager().getSchema(schemaName);
|
|
||||||
return new ErasureCodingZone(dir.getInode(inode.getId())
|
return new ErasureCodingZone(dir.getInode(inode.getId())
|
||||||
.getFullPathName(), schema, cellSize);
|
.getFullPathName(), ecPolicy);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -101,7 +99,7 @@ public class ErasureCodingZoneManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
List<XAttr> createErasureCodingZone(final INodesInPath srcIIP,
|
List<XAttr> createErasureCodingZone(final INodesInPath srcIIP,
|
||||||
ECSchema schema, int cellSize) throws IOException {
|
ErasureCodingPolicy ecPolicy) throws IOException {
|
||||||
assert dir.hasWriteLock();
|
assert dir.hasWriteLock();
|
||||||
Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
|
Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
|
||||||
String src = srcIIP.getPath();
|
String src = srcIIP.getPath();
|
||||||
|
@ -115,29 +113,22 @@ public class ErasureCodingZoneManager {
|
||||||
throw new IOException("Attempt to create an erasure coding zone " +
|
throw new IOException("Attempt to create an erasure coding zone " +
|
||||||
"for a file " + src);
|
"for a file " + src);
|
||||||
}
|
}
|
||||||
if (getErasureCodingSchema(srcIIP) != null) {
|
if (getErasureCodingPolicy(srcIIP) != null) {
|
||||||
throw new IOException("Directory " + src + " is already in an " +
|
throw new IOException("Directory " + src + " is already in an " +
|
||||||
"erasure coding zone.");
|
"erasure coding zone.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// System default schema will be used since no specified.
|
// System default erasure coding policy will be used since no specified.
|
||||||
if (schema == null) {
|
if (ecPolicy == null) {
|
||||||
schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cellSize <= 0) {
|
|
||||||
cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the cellsize first and then schema name
|
|
||||||
final XAttr ecXAttr;
|
final XAttr ecXAttr;
|
||||||
DataOutputStream dOut = null;
|
DataOutputStream dOut = null;
|
||||||
try {
|
try {
|
||||||
ByteArrayOutputStream bOut = new ByteArrayOutputStream();
|
ByteArrayOutputStream bOut = new ByteArrayOutputStream();
|
||||||
dOut = new DataOutputStream(bOut);
|
dOut = new DataOutputStream(bOut);
|
||||||
WritableUtils.writeVInt(dOut, cellSize);
|
WritableUtils.writeString(dOut, ecPolicy.getName());
|
||||||
// Now persist the schema name in xattr
|
|
||||||
WritableUtils.writeString(dOut, schema.getSchemaName());
|
|
||||||
ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_ZONE,
|
ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_ZONE,
|
||||||
bOut.toByteArray());
|
bOut.toByteArray());
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -158,10 +149,12 @@ public class ErasureCodingZoneManager {
|
||||||
if (srcZone != null && srcZone.getDir().equals(src) && dstZone == null) {
|
if (srcZone != null && srcZone.getDir().equals(src) && dstZone == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
final ECSchema srcSchema = (srcZone != null) ? srcZone.getSchema() : null;
|
final ErasureCodingPolicy srcECPolicy =
|
||||||
final ECSchema dstSchema = (dstZone != null) ? dstZone.getSchema() : null;
|
srcZone != null ? srcZone.getErasureCodingPolicy() : null;
|
||||||
if ((srcSchema != null && !srcSchema.equals(dstSchema)) ||
|
final ErasureCodingPolicy dstECPolicy =
|
||||||
(dstSchema != null && !dstSchema.equals(srcSchema))) {
|
dstZone != null ? dstZone.getErasureCodingPolicy() : null;
|
||||||
|
if (srcECPolicy != null && !srcECPolicy.equals(dstECPolicy) ||
|
||||||
|
dstECPolicy != null && !dstECPolicy.equals(srcECPolicy)) {
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
src + " can't be moved because the source and destination have " +
|
src + " can't be moved because the source and destination have " +
|
||||||
"different erasure coding policies.");
|
"different erasure coding policies.");
|
||||||
|
|
|
@ -22,9 +22,9 @@ import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper class to perform erasure coding related operations.
|
* Helper class to perform erasure coding related operations.
|
||||||
|
@ -43,15 +43,14 @@ final class FSDirErasureCodingOp {
|
||||||
* @param fsn namespace
|
* @param fsn namespace
|
||||||
* @param srcArg the path of a directory which will be the root of the
|
* @param srcArg the path of a directory which will be the root of the
|
||||||
* erasure coding zone. The directory must be empty.
|
* erasure coding zone. The directory must be empty.
|
||||||
* @param schema ECSchema for the erasure coding zone
|
* @param ecPolicy erasure coding policy for the erasure coding zone
|
||||||
* @param cellSize Cell size of stripe
|
|
||||||
* @param logRetryCache whether to record RPC ids in editlog for retry
|
* @param logRetryCache whether to record RPC ids in editlog for retry
|
||||||
* cache rebuilding
|
* cache rebuilding
|
||||||
* @return {@link HdfsFileStatus}
|
* @return {@link HdfsFileStatus}
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
static HdfsFileStatus createErasureCodingZone(final FSNamesystem fsn,
|
static HdfsFileStatus createErasureCodingZone(final FSNamesystem fsn,
|
||||||
final String srcArg, final ECSchema schema, final int cellSize,
|
final String srcArg, final ErasureCodingPolicy ecPolicy,
|
||||||
final boolean logRetryCache) throws IOException {
|
final boolean logRetryCache) throws IOException {
|
||||||
assert fsn.hasWriteLock();
|
assert fsn.hasWriteLock();
|
||||||
|
|
||||||
|
@ -68,7 +67,7 @@ final class FSDirErasureCodingOp {
|
||||||
try {
|
try {
|
||||||
iip = fsd.getINodesInPath4Write(src, false);
|
iip = fsd.getINodesInPath4Write(src, false);
|
||||||
xAttrs = fsn.getErasureCodingZoneManager().createErasureCodingZone(
|
xAttrs = fsn.getErasureCodingZoneManager().createErasureCodingZone(
|
||||||
iip, schema, cellSize);
|
iip, ecPolicy);
|
||||||
} finally {
|
} finally {
|
||||||
fsd.writeUnlock();
|
fsd.writeUnlock();
|
||||||
}
|
}
|
||||||
|
@ -120,7 +119,7 @@ final class FSDirErasureCodingOp {
|
||||||
assert fsn.hasReadLock();
|
assert fsn.hasReadLock();
|
||||||
|
|
||||||
final INodesInPath iip = getINodesInPath(fsn, srcArg);
|
final INodesInPath iip = getINodesInPath(fsn, srcArg);
|
||||||
return getErasureCodingSchemaForPath(fsn, iip) != null;
|
return getErasureCodingPolicyForPath(fsn, iip) != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -133,49 +132,35 @@ final class FSDirErasureCodingOp {
|
||||||
*/
|
*/
|
||||||
static boolean isInErasureCodingZone(final FSNamesystem fsn,
|
static boolean isInErasureCodingZone(final FSNamesystem fsn,
|
||||||
final INodesInPath iip) throws IOException {
|
final INodesInPath iip) throws IOException {
|
||||||
return getErasureCodingSchema(fsn, iip) != null;
|
return getErasureCodingPolicy(fsn, iip) != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get erasure coding schema.
|
* Get the erasure coding policy.
|
||||||
*
|
*
|
||||||
* @param fsn namespace
|
* @param fsn namespace
|
||||||
* @param iip inodes in the path containing the file
|
* @param iip inodes in the path containing the file
|
||||||
* @return {@link ECSchema}
|
* @return {@link ErasureCodingPolicy}
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
static ECSchema getErasureCodingSchema(final FSNamesystem fsn,
|
static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
|
||||||
final INodesInPath iip) throws IOException {
|
final INodesInPath iip) throws IOException {
|
||||||
assert fsn.hasReadLock();
|
assert fsn.hasReadLock();
|
||||||
|
|
||||||
return getErasureCodingSchemaForPath(fsn, iip);
|
return getErasureCodingPolicyForPath(fsn, iip);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get available erasure coding schemas.
|
* Get available erasure coding polices.
|
||||||
*
|
*
|
||||||
* @param fsn namespace
|
* @param fsn namespace
|
||||||
* @return {@link ECSchema} array
|
* @return {@link ErasureCodingPolicy} array
|
||||||
*/
|
*/
|
||||||
static ECSchema[] getErasureCodingSchemas(final FSNamesystem fsn)
|
static ErasureCodingPolicy[] getErasureCodingPolicies(final FSNamesystem fsn)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert fsn.hasReadLock();
|
assert fsn.hasReadLock();
|
||||||
|
|
||||||
return fsn.getErasureCodingSchemaManager().getSchemas();
|
return fsn.getErasureCodingPolicyManager().getPolicies();
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the ECSchema specified by the name.
|
|
||||||
*
|
|
||||||
* @param fsn namespace
|
|
||||||
* @param schemaName schema name
|
|
||||||
* @return {@link ECSchema}
|
|
||||||
*/
|
|
||||||
static ECSchema getErasureCodingSchema(final FSNamesystem fsn,
|
|
||||||
final String schemaName) throws IOException {
|
|
||||||
assert fsn.hasReadLock();
|
|
||||||
|
|
||||||
return fsn.getErasureCodingSchemaManager().getSchema(schemaName);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static INodesInPath getINodesInPath(final FSNamesystem fsn,
|
private static INodesInPath getINodesInPath(final FSNamesystem fsn,
|
||||||
|
@ -204,12 +189,12 @@ final class FSDirErasureCodingOp {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static ECSchema getErasureCodingSchemaForPath(final FSNamesystem fsn,
|
private static ErasureCodingPolicy getErasureCodingPolicyForPath(final FSNamesystem fsn,
|
||||||
final INodesInPath iip) throws IOException {
|
final INodesInPath iip) throws IOException {
|
||||||
final FSDirectory fsd = fsn.getFSDirectory();
|
final FSDirectory fsd = fsn.getFSDirectory();
|
||||||
fsd.readLock();
|
fsd.readLock();
|
||||||
try {
|
try {
|
||||||
return fsn.getErasureCodingZoneManager().getErasureCodingSchema(iip);
|
return fsn.getErasureCodingZoneManager().getErasureCodingPolicy(iip);
|
||||||
} finally {
|
} finally {
|
||||||
fsd.readUnlock();
|
fsd.readUnlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
@ -40,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -318,7 +318,7 @@ class FSDirStatAndListingOp {
|
||||||
if (fsd.getINode4DotSnapshot(srcs) != null) {
|
if (fsd.getINode4DotSnapshot(srcs) != null) {
|
||||||
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
|
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
|
||||||
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
|
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
|
||||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null, 0);
|
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -388,8 +388,8 @@ class FSDirStatAndListingOp {
|
||||||
|
|
||||||
final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
|
final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
|
||||||
fsd.getFSNamesystem(), iip);
|
fsd.getFSNamesystem(), iip);
|
||||||
final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
|
final ErasureCodingPolicy ecPolicy =
|
||||||
final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
|
ecZone != null ? ecZone.getErasureCodingPolicy() : null;
|
||||||
|
|
||||||
if (node.isFile()) {
|
if (node.isFile()) {
|
||||||
final INodeFile fileNode = node.asFile();
|
final INodeFile fileNode = node.asFile();
|
||||||
|
@ -421,8 +421,7 @@ class FSDirStatAndListingOp {
|
||||||
childrenNum,
|
childrenNum,
|
||||||
feInfo,
|
feInfo,
|
||||||
storagePolicy,
|
storagePolicy,
|
||||||
schema,
|
ecPolicy);
|
||||||
cellSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static INodeAttributes getINodeAttributes(
|
private static INodeAttributes getINodeAttributes(
|
||||||
|
@ -471,8 +470,8 @@ class FSDirStatAndListingOp {
|
||||||
}
|
}
|
||||||
int childrenNum = node.isDirectory() ?
|
int childrenNum = node.isDirectory() ?
|
||||||
node.asDirectory().getChildrenNum(snapshot) : 0;
|
node.asDirectory().getChildrenNum(snapshot) : 0;
|
||||||
final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
|
final ErasureCodingPolicy ecPolicy =
|
||||||
final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
|
ecZone != null ? ecZone.getErasureCodingPolicy() : null;
|
||||||
|
|
||||||
HdfsLocatedFileStatus status =
|
HdfsLocatedFileStatus status =
|
||||||
new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
|
new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
|
||||||
|
@ -481,8 +480,7 @@ class FSDirStatAndListingOp {
|
||||||
getPermissionForFileStatus(nodeAttrs, isEncrypted),
|
getPermissionForFileStatus(nodeAttrs, isEncrypted),
|
||||||
nodeAttrs.getUserName(), nodeAttrs.getGroupName(),
|
nodeAttrs.getUserName(), nodeAttrs.getGroupName(),
|
||||||
node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
|
node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
|
||||||
node.getId(), loc, childrenNum, feInfo, storagePolicy, schema,
|
node.getId(), loc, childrenNum, feInfo, storagePolicy, ecPolicy);
|
||||||
cellSize);
|
|
||||||
// Set caching information for the located blocks.
|
// Set caching information for the located blocks.
|
||||||
if (loc != null) {
|
if (loc != null) {
|
||||||
CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
|
CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
@ -51,7 +52,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
import org.apache.hadoop.net.NodeBase;
|
import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.util.ChunkedArrayList;
|
import org.apache.hadoop.util.ChunkedArrayList;
|
||||||
|
@ -532,16 +532,15 @@ class FSDirWriteFileOp {
|
||||||
if (isStriped) {
|
if (isStriped) {
|
||||||
ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
|
ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
|
||||||
fsd.getFSNamesystem(), inodesInPath);
|
fsd.getFSNamesystem(), inodesInPath);
|
||||||
ECSchema ecSchema = ecZone.getSchema();
|
ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy();
|
||||||
short numDataUnits = (short) ecSchema.getNumDataUnits();
|
short numDataUnits = (short) ecPolicy.getNumDataUnits();
|
||||||
short numParityUnits = (short) ecSchema.getNumParityUnits();
|
short numParityUnits = (short) ecPolicy.getNumParityUnits();
|
||||||
short numLocations = (short) (numDataUnits + numParityUnits);
|
short numLocations = (short) (numDataUnits + numParityUnits);
|
||||||
|
|
||||||
// check quota limits and updated space consumed
|
// check quota limits and updated space consumed
|
||||||
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
|
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
|
||||||
numLocations, true);
|
numLocations, true);
|
||||||
blockInfo = new BlockInfoUnderConstructionStriped(block, ecSchema,
|
blockInfo = new BlockInfoUnderConstructionStriped(block, ecPolicy,
|
||||||
ecZone.getCellSize(),
|
|
||||||
HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
|
HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
|
||||||
} else {
|
} else {
|
||||||
// check quota limits and updated space consumed
|
// check quota limits and updated space consumed
|
||||||
|
|
|
@ -992,7 +992,7 @@ public class FSEditLogLoader {
|
||||||
boolean isStriped = ecZone != null;
|
boolean isStriped = ecZone != null;
|
||||||
if (isStriped) {
|
if (isStriped) {
|
||||||
newBlockInfo = new BlockInfoUnderConstructionStriped(newBlock,
|
newBlockInfo = new BlockInfoUnderConstructionStriped(newBlock,
|
||||||
ecZone.getSchema(), ecZone.getCellSize());
|
ecZone.getErasureCodingPolicy());
|
||||||
} else {
|
} else {
|
||||||
newBlockInfo = new BlockInfoUnderConstructionContiguous(newBlock,
|
newBlockInfo = new BlockInfoUnderConstructionContiguous(newBlock,
|
||||||
file.getPreferredBlockReplication());
|
file.getPreferredBlockReplication());
|
||||||
|
@ -1078,7 +1078,7 @@ public class FSEditLogLoader {
|
||||||
// until several blocks in?
|
// until several blocks in?
|
||||||
if (isStriped) {
|
if (isStriped) {
|
||||||
newBI = new BlockInfoUnderConstructionStriped(newBlock,
|
newBI = new BlockInfoUnderConstructionStriped(newBlock,
|
||||||
ecZone.getSchema(), ecZone.getCellSize());
|
ecZone.getErasureCodingPolicy());
|
||||||
} else {
|
} else {
|
||||||
newBI = new BlockInfoUnderConstructionContiguous(newBlock,
|
newBI = new BlockInfoUnderConstructionContiguous(newBlock,
|
||||||
file.getPreferredBlockReplication());
|
file.getPreferredBlockReplication());
|
||||||
|
@ -1088,11 +1088,9 @@ public class FSEditLogLoader {
|
||||||
// is only executed when loading edits written by prior
|
// is only executed when loading edits written by prior
|
||||||
// versions of Hadoop. Current versions always log
|
// versions of Hadoop. Current versions always log
|
||||||
// OP_ADD operations as each block is allocated.
|
// OP_ADD operations as each block is allocated.
|
||||||
// TODO: ECSchema can be restored from persisted file (HDFS-7859).
|
|
||||||
if (isStriped) {
|
if (isStriped) {
|
||||||
newBI = new BlockInfoStriped(newBlock,
|
newBI = new BlockInfoStriped(newBlock,
|
||||||
ErasureCodingSchemaManager.getSystemDefaultSchema(),
|
ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||||
ecZone.getCellSize());
|
|
||||||
} else {
|
} else {
|
||||||
newBI = new BlockInfoContiguous(newBlock,
|
newBI = new BlockInfoContiguous(newBlock,
|
||||||
file.getPreferredBlockReplication());
|
file.getPreferredBlockReplication());
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
|
@ -66,7 +67,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public final class FSImageFormatPBINode {
|
public final class FSImageFormatPBINode {
|
||||||
|
@ -327,17 +327,13 @@ public final class FSImageFormatPBINode {
|
||||||
short replication = (short) f.getReplication();
|
short replication = (short) f.getReplication();
|
||||||
boolean isStriped = f.getIsStriped();
|
boolean isStriped = f.getIsStriped();
|
||||||
LoaderContext state = parent.getLoaderContext();
|
LoaderContext state = parent.getLoaderContext();
|
||||||
ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
|
|
||||||
if (isStriped) {
|
|
||||||
Preconditions.checkState(f.hasStripingCellSize());
|
|
||||||
}
|
|
||||||
BlockInfo[] blocks = new BlockInfo[bp.size()];
|
BlockInfo[] blocks = new BlockInfo[bp.size()];
|
||||||
for (int i = 0; i < bp.size(); ++i) {
|
for (int i = 0; i < bp.size(); ++i) {
|
||||||
BlockProto b = bp.get(i);
|
BlockProto b = bp.get(i);
|
||||||
if (isStriped) {
|
if (isStriped) {
|
||||||
blocks[i] = new BlockInfoStriped(PBHelper.convert(b), schema,
|
blocks[i] = new BlockInfoStriped(PBHelper.convert(b), ecPolicy);
|
||||||
(int)f.getStripingCellSize());
|
|
||||||
} else {
|
} else {
|
||||||
blocks[i] = new BlockInfoContiguous(PBHelper.convert(b),
|
blocks[i] = new BlockInfoContiguous(PBHelper.convert(b),
|
||||||
replication);
|
replication);
|
||||||
|
@ -373,8 +369,7 @@ public final class FSImageFormatPBINode {
|
||||||
final BlockInfo ucBlk;
|
final BlockInfo ucBlk;
|
||||||
if (isStriped) {
|
if (isStriped) {
|
||||||
BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
|
BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
|
||||||
ucBlk = new BlockInfoUnderConstructionStriped(striped,
|
ucBlk = new BlockInfoUnderConstructionStriped(striped, ecPolicy);
|
||||||
schema, (int)f.getStripingCellSize());
|
|
||||||
} else {
|
} else {
|
||||||
ucBlk = new BlockInfoUnderConstructionContiguous(lastBlk,
|
ucBlk = new BlockInfoUnderConstructionContiguous(lastBlk,
|
||||||
replication);
|
replication);
|
||||||
|
@ -656,16 +651,6 @@ public final class FSImageFormatPBINode {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n.isStriped()) {
|
|
||||||
if (blocks != null && blocks.length > 0) {
|
|
||||||
BlockInfo firstBlock = blocks[0];
|
|
||||||
Preconditions.checkState(firstBlock.isStriped());
|
|
||||||
b.setStripingCellSize(((BlockInfoStriped)firstBlock).getCellSize());
|
|
||||||
} else {
|
|
||||||
b.setStripingCellSize(HdfsConstants.BLOCK_STRIPED_CELL_SIZE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
|
FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
|
||||||
if (uc != null) {
|
if (uc != null) {
|
||||||
INodeSection.FileUnderConstructionFeature f =
|
INodeSection.FileUnderConstructionFeature f =
|
||||||
|
|
|
@ -180,6 +180,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
@ -262,7 +263,6 @@ import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.apache.hadoop.ipc.RetriableException;
|
import org.apache.hadoop.ipc.RetriableException;
|
||||||
import org.apache.hadoop.ipc.RetryCache;
|
import org.apache.hadoop.ipc.RetryCache;
|
||||||
import org.apache.hadoop.ipc.Server;
|
import org.apache.hadoop.ipc.Server;
|
||||||
|
@ -426,7 +426,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
private final BlockManager blockManager;
|
private final BlockManager blockManager;
|
||||||
private final SnapshotManager snapshotManager;
|
private final SnapshotManager snapshotManager;
|
||||||
private final CacheManager cacheManager;
|
private final CacheManager cacheManager;
|
||||||
private final ErasureCodingSchemaManager ecSchemaManager;
|
private final ErasureCodingPolicyManager ecPolicyManager;
|
||||||
private final DatanodeStatistics datanodeStatistics;
|
private final DatanodeStatistics datanodeStatistics;
|
||||||
|
|
||||||
private String nameserviceId;
|
private String nameserviceId;
|
||||||
|
@ -606,7 +606,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
leaseManager.removeAllLeases();
|
leaseManager.removeAllLeases();
|
||||||
snapshotManager.clearSnapshottableDirs();
|
snapshotManager.clearSnapshottableDirs();
|
||||||
cacheManager.clear();
|
cacheManager.clear();
|
||||||
ecSchemaManager.clear();
|
ecPolicyManager.clear();
|
||||||
setImageLoaded(false);
|
setImageLoaded(false);
|
||||||
blockManager.clear();
|
blockManager.clear();
|
||||||
}
|
}
|
||||||
|
@ -846,7 +846,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
this.dir = new FSDirectory(this, conf);
|
this.dir = new FSDirectory(this, conf);
|
||||||
this.snapshotManager = new SnapshotManager(dir);
|
this.snapshotManager = new SnapshotManager(dir);
|
||||||
this.cacheManager = new CacheManager(this, conf, blockManager);
|
this.cacheManager = new CacheManager(this, conf, blockManager);
|
||||||
this.ecSchemaManager = new ErasureCodingSchemaManager();
|
this.ecPolicyManager = new ErasureCodingPolicyManager();
|
||||||
this.safeMode = new SafeModeInfo(conf);
|
this.safeMode = new SafeModeInfo(conf);
|
||||||
this.topConf = new TopConf(conf);
|
this.topConf = new TopConf(conf);
|
||||||
this.auditLoggers = initAuditLoggers(conf);
|
this.auditLoggers = initAuditLoggers(conf);
|
||||||
|
@ -3679,16 +3679,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
if (fileINode.isStriped()) {
|
if (fileINode.isStriped()) {
|
||||||
final ErasureCodingZone ecZone = FSDirErasureCodingOp
|
final ErasureCodingZone ecZone = FSDirErasureCodingOp
|
||||||
.getErasureCodingZone(this, iip);
|
.getErasureCodingZone(this, iip);
|
||||||
final ECSchema ecSchema = ecZone.getSchema();
|
final ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy();
|
||||||
final short numDataUnits = (short) ecSchema.getNumDataUnits();
|
final short numDataUnits = (short) ecPolicy.getNumDataUnits();
|
||||||
final short numParityUnits = (short) ecSchema.getNumParityUnits();
|
final short numParityUnits = (short) ecPolicy.getNumParityUnits();
|
||||||
|
|
||||||
final long numBlocks = numDataUnits + numParityUnits;
|
final long numBlocks = numDataUnits + numParityUnits;
|
||||||
final long fullBlockGroupSize =
|
final long fullBlockGroupSize =
|
||||||
fileINode.getPreferredBlockSize() * numBlocks;
|
fileINode.getPreferredBlockSize() * numBlocks;
|
||||||
|
|
||||||
final BlockInfoStriped striped = new BlockInfoStriped(commitBlock,
|
final BlockInfoStriped striped = new BlockInfoStriped(commitBlock,
|
||||||
ecSchema, ecZone.getCellSize());
|
ecPolicy);
|
||||||
final long actualBlockGroupSize = striped.spaceConsumed();
|
final long actualBlockGroupSize = striped.spaceConsumed();
|
||||||
|
|
||||||
diff = fullBlockGroupSize - actualBlockGroupSize;
|
diff = fullBlockGroupSize - actualBlockGroupSize;
|
||||||
|
@ -6676,9 +6676,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
return cacheManager;
|
return cacheManager;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the ErasureCodingSchemaManager. */
|
/** @return the ErasureCodingPolicyManager. */
|
||||||
public ErasureCodingSchemaManager getErasureCodingSchemaManager() {
|
public ErasureCodingPolicyManager getErasureCodingPolicyManager() {
|
||||||
return ecSchemaManager;
|
return ecPolicyManager;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the ErasureCodingZoneManager. */
|
/** @return the ErasureCodingZoneManager. */
|
||||||
|
@ -7581,14 +7581,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
* Create an erasure coding zone on directory src.
|
* Create an erasure coding zone on directory src.
|
||||||
* @param srcArg the path of a directory which will be the root of the
|
* @param srcArg the path of a directory which will be the root of the
|
||||||
* erasure coding zone. The directory must be empty.
|
* erasure coding zone. The directory must be empty.
|
||||||
* @param schema ECSchema for the erasure coding zone
|
* @param ecPolicy erasure coding policy for the erasure coding zone
|
||||||
* @param cellSize Cell size of stripe
|
|
||||||
* @throws AccessControlException if the caller is not the superuser.
|
* @throws AccessControlException if the caller is not the superuser.
|
||||||
* @throws UnresolvedLinkException if the path can't be resolved.
|
* @throws UnresolvedLinkException if the path can't be resolved.
|
||||||
* @throws SafeModeException if the Namenode is in safe mode.
|
* @throws SafeModeException if the Namenode is in safe mode.
|
||||||
*/
|
*/
|
||||||
void createErasureCodingZone(final String srcArg, final ECSchema schema,
|
void createErasureCodingZone(final String srcArg, final ErasureCodingPolicy
|
||||||
int cellSize, final boolean logRetryCache) throws IOException,
|
ecPolicy, final boolean logRetryCache) throws IOException,
|
||||||
UnresolvedLinkException, SafeModeException, AccessControlException {
|
UnresolvedLinkException, SafeModeException, AccessControlException {
|
||||||
checkSuperuserPrivilege();
|
checkSuperuserPrivilege();
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
|
@ -7599,7 +7598,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
checkNameNodeSafeMode("Cannot create erasure coding zone on " + srcArg);
|
checkNameNodeSafeMode("Cannot create erasure coding zone on " + srcArg);
|
||||||
resultingStat = FSDirErasureCodingOp.createErasureCodingZone(this,
|
resultingStat = FSDirErasureCodingOp.createErasureCodingZone(this,
|
||||||
srcArg, schema, cellSize, logRetryCache);
|
srcArg, ecPolicy, logRetryCache);
|
||||||
success = true;
|
success = true;
|
||||||
} finally {
|
} finally {
|
||||||
writeUnlock();
|
writeUnlock();
|
||||||
|
@ -7627,30 +7626,15 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get available erasure coding schemas
|
* Get available erasure coding polices
|
||||||
*/
|
*/
|
||||||
ECSchema[] getErasureCodingSchemas() throws IOException {
|
ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||||
checkOperation(OperationCategory.READ);
|
checkOperation(OperationCategory.READ);
|
||||||
waitForLoadingFSImage();
|
waitForLoadingFSImage();
|
||||||
readLock();
|
readLock();
|
||||||
try {
|
try {
|
||||||
checkOperation(OperationCategory.READ);
|
checkOperation(OperationCategory.READ);
|
||||||
return FSDirErasureCodingOp.getErasureCodingSchemas(this);
|
return FSDirErasureCodingOp.getErasureCodingPolicies(this);
|
||||||
} finally {
|
|
||||||
readUnlock();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the ECSchema specified by the name
|
|
||||||
*/
|
|
||||||
ECSchema getErasureCodingSchema(String schemaName) throws IOException {
|
|
||||||
checkOperation(OperationCategory.READ);
|
|
||||||
waitForLoadingFSImage();
|
|
||||||
readLock();
|
|
||||||
try {
|
|
||||||
checkOperation(OperationCategory.READ);
|
|
||||||
return FSDirErasureCodingOp.getErasureCodingSchema(this, schemaName);
|
|
||||||
} finally {
|
} finally {
|
||||||
readUnlock();
|
readUnlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,6 +84,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
@ -143,7 +144,6 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.ipc.RetryCache;
|
import org.apache.hadoop.ipc.RetryCache;
|
||||||
|
@ -1823,7 +1823,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // ClientProtocol
|
@Override // ClientProtocol
|
||||||
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
|
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkNNStartup();
|
checkNNStartup();
|
||||||
final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
|
final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
|
||||||
|
@ -1832,8 +1832,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
}
|
}
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
try {
|
try {
|
||||||
namesystem.createErasureCodingZone(src, schema, cellSize,
|
namesystem.createErasureCodingZone(src, ecPolicy, cacheEntry != null);
|
||||||
cacheEntry != null);
|
|
||||||
success = true;
|
success = true;
|
||||||
} finally {
|
} finally {
|
||||||
RetryCache.setState(cacheEntry, success);
|
RetryCache.setState(cacheEntry, success);
|
||||||
|
@ -2035,9 +2034,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // ClientProtocol
|
@Override // ClientProtocol
|
||||||
public ECSchema[] getECSchemas() throws IOException {
|
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||||
checkNNStartup();
|
checkNNStartup();
|
||||||
return namesystem.getErasureCodingSchemas();
|
return namesystem.getErasureCodingPolicies();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // ClientProtocol
|
@Override // ClientProtocol
|
||||||
|
|
|
@ -572,7 +572,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
|
|
||||||
// count expected replicas
|
// count expected replicas
|
||||||
short targetFileReplication;
|
short targetFileReplication;
|
||||||
if (file.getECSchema() != null) {
|
if (file.getErasureCodingPolicy() != null) {
|
||||||
assert storedBlock instanceof BlockInfoStriped;
|
assert storedBlock instanceof BlockInfoStriped;
|
||||||
targetFileReplication = ((BlockInfoStriped) storedBlock)
|
targetFileReplication = ((BlockInfoStriped) storedBlock)
|
||||||
.getRealTotalBlockNum();
|
.getRealTotalBlockNum();
|
||||||
|
@ -1158,11 +1158,11 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
static class ErasureCodingResult extends Result {
|
static class ErasureCodingResult extends Result {
|
||||||
final String defaultSchema;
|
final String defaultECPolicy;
|
||||||
|
|
||||||
ErasureCodingResult(Configuration conf) {
|
ErasureCodingResult(Configuration conf) {
|
||||||
defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema()
|
defaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy()
|
||||||
.getSchemaName();
|
.getName();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -1239,7 +1239,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks))
|
((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks))
|
||||||
.append(" %)");
|
.append(" %)");
|
||||||
}
|
}
|
||||||
res.append("\n Default schema:\t\t").append(defaultSchema)
|
res.append("\n Default ecPolicy:\t\t").append(defaultECPolicy)
|
||||||
.append("\n Average block group size:\t").append(
|
.append("\n Average block group size:\t").append(
|
||||||
getReplicationFactor()).append("\n Missing block groups:\t\t").append(
|
getReplicationFactor()).append("\n Missing block groups:\t\t").append(
|
||||||
missingIds.size()).append("\n Corrupt block groups:\t\t").append(
|
missingIds.size()).append("\n Corrupt block groups:\t\t").append(
|
||||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
@ -77,31 +77,28 @@ public class BlockECRecoveryCommand extends DatanodeCommand {
|
||||||
private String[] targetStorageIDs;
|
private String[] targetStorageIDs;
|
||||||
private StorageType[] targetStorageTypes;
|
private StorageType[] targetStorageTypes;
|
||||||
private final short[] liveBlockIndices;
|
private final short[] liveBlockIndices;
|
||||||
private final ECSchema ecSchema;
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
private final int cellSize;
|
|
||||||
|
|
||||||
public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
|
public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
|
||||||
DatanodeStorageInfo[] targetDnStorageInfo, short[] liveBlockIndices,
|
DatanodeStorageInfo[] targetDnStorageInfo, short[] liveBlockIndices,
|
||||||
ECSchema ecSchema, int cellSize) {
|
ErasureCodingPolicy ecPolicy) {
|
||||||
this(block, sources, DatanodeStorageInfo
|
this(block, sources, DatanodeStorageInfo
|
||||||
.toDatanodeInfos(targetDnStorageInfo), DatanodeStorageInfo
|
.toDatanodeInfos(targetDnStorageInfo), DatanodeStorageInfo
|
||||||
.toStorageIDs(targetDnStorageInfo), DatanodeStorageInfo
|
.toStorageIDs(targetDnStorageInfo), DatanodeStorageInfo
|
||||||
.toStorageTypes(targetDnStorageInfo), liveBlockIndices, ecSchema,
|
.toStorageTypes(targetDnStorageInfo), liveBlockIndices, ecPolicy);
|
||||||
cellSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
|
public BlockECRecoveryInfo(ExtendedBlock block, DatanodeInfo[] sources,
|
||||||
DatanodeInfo[] targets, String[] targetStorageIDs,
|
DatanodeInfo[] targets, String[] targetStorageIDs,
|
||||||
StorageType[] targetStorageTypes, short[] liveBlockIndices,
|
StorageType[] targetStorageTypes, short[] liveBlockIndices,
|
||||||
ECSchema ecSchema, int cellSize) {
|
ErasureCodingPolicy ecPolicy) {
|
||||||
this.block = block;
|
this.block = block;
|
||||||
this.sources = sources;
|
this.sources = sources;
|
||||||
this.targets = targets;
|
this.targets = targets;
|
||||||
this.targetStorageIDs = targetStorageIDs;
|
this.targetStorageIDs = targetStorageIDs;
|
||||||
this.targetStorageTypes = targetStorageTypes;
|
this.targetStorageTypes = targetStorageTypes;
|
||||||
this.liveBlockIndices = liveBlockIndices;
|
this.liveBlockIndices = liveBlockIndices;
|
||||||
this.ecSchema = ecSchema;
|
this.ecPolicy = ecPolicy;
|
||||||
this.cellSize = cellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public ExtendedBlock getExtendedBlock() {
|
public ExtendedBlock getExtendedBlock() {
|
||||||
|
@ -128,12 +125,8 @@ public class BlockECRecoveryCommand extends DatanodeCommand {
|
||||||
return liveBlockIndices;
|
return liveBlockIndices;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ECSchema getECSchema() {
|
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||||
return ecSchema;
|
return ecPolicy;
|
||||||
}
|
|
||||||
|
|
||||||
public int getCellSize() {
|
|
||||||
return cellSize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -31,9 +31,8 @@ import org.apache.hadoop.fs.shell.CommandFactory;
|
||||||
import org.apache.hadoop.fs.shell.PathData;
|
import org.apache.hadoop.fs.shell.PathData;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
|
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -49,7 +48,7 @@ public abstract class ECCommand extends Command {
|
||||||
factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME);
|
factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME);
|
||||||
factory.addClass(GetECZoneCommand.class, "-"
|
factory.addClass(GetECZoneCommand.class, "-"
|
||||||
+ GetECZoneCommand.NAME);
|
+ GetECZoneCommand.NAME);
|
||||||
factory.addClass(ListECSchemas.class, "-" + ListECSchemas.NAME);
|
factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -77,35 +76,24 @@ public abstract class ECCommand extends Command {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create EC encoding zone command. Zones are created to use specific EC
|
* A command to create an EC zone for a path, with a erasure coding policy name.
|
||||||
* encoding schema, other than default while encoding the files under some
|
|
||||||
* specific directory.
|
|
||||||
*/
|
*/
|
||||||
static class CreateECZoneCommand extends ECCommand {
|
static class CreateECZoneCommand extends ECCommand {
|
||||||
public static final String NAME = "createZone";
|
public static final String NAME = "createZone";
|
||||||
public static final String USAGE = "[-s <schemaName>] [-c <cellSize>] <path>";
|
public static final String USAGE = "[-s <policyName>] <path>";
|
||||||
public static final String DESCRIPTION =
|
public static final String DESCRIPTION =
|
||||||
"Create a zone to encode files using a specified schema\n"
|
"Create a zone to encode files using a specified policy\n"
|
||||||
+ "Options :\n"
|
+ "Options :\n"
|
||||||
+ " -s <schemaName> : EC schema name to encode files. "
|
+ " -s <policyName> : erasure coding policy name to encode files. "
|
||||||
+ "If not passed default schema will be used\n"
|
+ "If not passed the default policy will be used\n"
|
||||||
+ " -c <cellSize> : cell size to use for striped encoding files."
|
|
||||||
+ " If not passed default cellsize of "
|
|
||||||
+ HdfsConstants.BLOCK_STRIPED_CELL_SIZE + " will be used\n"
|
|
||||||
+ " <path> : Path to an empty directory. Under this directory "
|
+ " <path> : Path to an empty directory. Under this directory "
|
||||||
+ "files will be encoded using specified schema";
|
+ "files will be encoded using specified erasure coding policy";
|
||||||
private String schemaName;
|
private String ecPolicyName;
|
||||||
private int cellSize = 0;
|
private ErasureCodingPolicy ecPolicy = null;
|
||||||
private ECSchema schema = null;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||||
schemaName = StringUtils.popOptionWithArgument("-s", args);
|
ecPolicyName = StringUtils.popOptionWithArgument("-s", args);
|
||||||
String cellSizeStr = StringUtils.popOptionWithArgument("-c", args);
|
|
||||||
if (cellSizeStr != null) {
|
|
||||||
cellSize = (int) StringUtils.TraditionalBinaryPrefix
|
|
||||||
.string2long(cellSizeStr);
|
|
||||||
}
|
|
||||||
if (args.isEmpty()) {
|
if (args.isEmpty()) {
|
||||||
throw new HadoopIllegalArgumentException("<path> is missing");
|
throw new HadoopIllegalArgumentException("<path> is missing");
|
||||||
}
|
}
|
||||||
|
@ -119,29 +107,29 @@ public abstract class ECCommand extends Command {
|
||||||
super.processPath(item);
|
super.processPath(item);
|
||||||
DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
|
DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
|
||||||
try {
|
try {
|
||||||
if (schemaName != null) {
|
if (ecPolicyName != null) {
|
||||||
ECSchema[] ecSchemas = dfs.getClient().getECSchemas();
|
ErasureCodingPolicy[] ecPolicies = dfs.getClient().getErasureCodingPolicies();
|
||||||
for (ECSchema ecSchema : ecSchemas) {
|
for (ErasureCodingPolicy ecPolicy : ecPolicies) {
|
||||||
if (schemaName.equals(ecSchema.getSchemaName())) {
|
if (ecPolicyName.equals(ecPolicy.getName())) {
|
||||||
schema = ecSchema;
|
this.ecPolicy = ecPolicy;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (schema == null) {
|
if (ecPolicy == null) {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append("Schema '");
|
sb.append("Policy '");
|
||||||
sb.append(schemaName);
|
sb.append(ecPolicyName);
|
||||||
sb.append("' does not match any of the supported schemas.");
|
sb.append("' does not match any of the supported policies.");
|
||||||
sb.append(" Please select any one of ");
|
sb.append(" Please select any one of ");
|
||||||
List<String> schemaNames = new ArrayList<String>();
|
List<String> ecPolicyNames = new ArrayList<String>();
|
||||||
for (ECSchema ecSchema : ecSchemas) {
|
for (ErasureCodingPolicy ecPolicy : ecPolicies) {
|
||||||
schemaNames.add(ecSchema.getSchemaName());
|
ecPolicyNames.add(ecPolicy.getName());
|
||||||
}
|
}
|
||||||
sb.append(schemaNames);
|
sb.append(ecPolicyNames);
|
||||||
throw new HadoopIllegalArgumentException(sb.toString());
|
throw new HadoopIllegalArgumentException(sb.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dfs.createErasureCodingZone(item.path, schema, cellSize);
|
dfs.createErasureCodingZone(item.path, ecPolicy);
|
||||||
out.println("EC Zone created successfully at " + item.path);
|
out.println("EC Zone created successfully at " + item.path);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Unable to create EC zone for the path "
|
throw new IOException("Unable to create EC zone for the path "
|
||||||
|
@ -188,13 +176,13 @@ public abstract class ECCommand extends Command {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* List all supported EC Schemas
|
* List all supported erasure coding policies
|
||||||
*/
|
*/
|
||||||
static class ListECSchemas extends ECCommand {
|
static class ListPolicies extends ECCommand {
|
||||||
public static final String NAME = "listSchemas";
|
public static final String NAME = "listPolicies";
|
||||||
public static final String USAGE = "";
|
public static final String USAGE = "";
|
||||||
public static final String DESCRIPTION =
|
public static final String DESCRIPTION =
|
||||||
"Get the list of ECSchemas supported\n";
|
"Get the list of erasure coding policies supported\n";
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||||
|
@ -209,14 +197,14 @@ public abstract class ECCommand extends Command {
|
||||||
}
|
}
|
||||||
DistributedFileSystem dfs = (DistributedFileSystem) fs;
|
DistributedFileSystem dfs = (DistributedFileSystem) fs;
|
||||||
|
|
||||||
ECSchema[] ecSchemas = dfs.getClient().getECSchemas();
|
ErasureCodingPolicy[] ecPolicies = dfs.getClient().getErasureCodingPolicies();
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
int i = 0;
|
int i = 0;
|
||||||
while (i < ecSchemas.length) {
|
while (i < ecPolicies.length) {
|
||||||
ECSchema ecSchema = ecSchemas[i];
|
ErasureCodingPolicy ecPolicy = ecPolicies[i];
|
||||||
sb.append(ecSchema.getSchemaName());
|
sb.append(ecPolicy.getName());
|
||||||
i++;
|
i++;
|
||||||
if (i < ecSchemas.length) {
|
if (i < ecPolicies.length) {
|
||||||
sb.append(", ");
|
sb.append(", ");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
|
@ -318,7 +318,7 @@ public class StripedBlockUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode based on the given input buffers and schema.
|
* Decode based on the given input buffers and erasure coding policy.
|
||||||
*/
|
*/
|
||||||
public static void decodeAndFillBuffer(final byte[][] decodeInputs,
|
public static void decodeAndFillBuffer(final byte[][] decodeInputs,
|
||||||
AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
|
AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
|
||||||
|
@ -355,20 +355,20 @@ public class StripedBlockUtil {
|
||||||
* by stateful read and uses ByteBuffer as reading target buffer. Besides the
|
* by stateful read and uses ByteBuffer as reading target buffer. Besides the
|
||||||
* read range is within a single stripe thus the calculation logic is simpler.
|
* read range is within a single stripe thus the calculation logic is simpler.
|
||||||
*/
|
*/
|
||||||
public static AlignedStripe[] divideOneStripe(ECSchema ecSchema,
|
public static AlignedStripe[] divideOneStripe(ErasureCodingPolicy ecPolicy,
|
||||||
int cellSize, LocatedStripedBlock blockGroup, long rangeStartInBlockGroup,
|
int cellSize, LocatedStripedBlock blockGroup, long rangeStartInBlockGroup,
|
||||||
long rangeEndInBlockGroup, ByteBuffer buf) {
|
long rangeEndInBlockGroup, ByteBuffer buf) {
|
||||||
final int dataBlkNum = ecSchema.getNumDataUnits();
|
final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||||
// Step 1: map the byte range to StripingCells
|
// Step 1: map the byte range to StripingCells
|
||||||
StripingCell[] cells = getStripingCellsOfByteRange(ecSchema, cellSize,
|
StripingCell[] cells = getStripingCellsOfByteRange(ecPolicy, cellSize,
|
||||||
blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup);
|
blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup);
|
||||||
|
|
||||||
// Step 2: get the unmerged ranges on each internal block
|
// Step 2: get the unmerged ranges on each internal block
|
||||||
VerticalRange[] ranges = getRangesForInternalBlocks(ecSchema, cellSize,
|
VerticalRange[] ranges = getRangesForInternalBlocks(ecPolicy, cellSize,
|
||||||
cells);
|
cells);
|
||||||
|
|
||||||
// Step 3: merge into stripes
|
// Step 3: merge into stripes
|
||||||
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecSchema, ranges);
|
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
|
||||||
|
|
||||||
// Step 4: calculate each chunk's position in destination buffer. Since the
|
// Step 4: calculate each chunk's position in destination buffer. Since the
|
||||||
// whole read range is within a single stripe, the logic is simpler here.
|
// whole read range is within a single stripe, the logic is simpler here.
|
||||||
|
@ -400,7 +400,7 @@ public class StripedBlockUtil {
|
||||||
/**
|
/**
|
||||||
* This method divides a requested byte range into an array of inclusive
|
* This method divides a requested byte range into an array of inclusive
|
||||||
* {@link AlignedStripe}.
|
* {@link AlignedStripe}.
|
||||||
* @param ecSchema The codec schema for the file, which carries the numbers
|
* @param ecPolicy The codec policy for the file, which carries the numbers
|
||||||
* of data / parity blocks
|
* of data / parity blocks
|
||||||
* @param cellSize Cell size of stripe
|
* @param cellSize Cell size of stripe
|
||||||
* @param blockGroup The striped block group
|
* @param blockGroup The striped block group
|
||||||
|
@ -412,24 +412,24 @@ public class StripedBlockUtil {
|
||||||
* At most 5 stripes will be generated from each logical range, as
|
* At most 5 stripes will be generated from each logical range, as
|
||||||
* demonstrated in the header of {@link AlignedStripe}.
|
* demonstrated in the header of {@link AlignedStripe}.
|
||||||
*/
|
*/
|
||||||
public static AlignedStripe[] divideByteRangeIntoStripes(ECSchema ecSchema,
|
public static AlignedStripe[] divideByteRangeIntoStripes(ErasureCodingPolicy ecPolicy,
|
||||||
int cellSize, LocatedStripedBlock blockGroup,
|
int cellSize, LocatedStripedBlock blockGroup,
|
||||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup, byte[] buf,
|
long rangeStartInBlockGroup, long rangeEndInBlockGroup, byte[] buf,
|
||||||
int offsetInBuf) {
|
int offsetInBuf) {
|
||||||
|
|
||||||
// Step 0: analyze range and calculate basic parameters
|
// Step 0: analyze range and calculate basic parameters
|
||||||
final int dataBlkNum = ecSchema.getNumDataUnits();
|
final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||||
|
|
||||||
// Step 1: map the byte range to StripingCells
|
// Step 1: map the byte range to StripingCells
|
||||||
StripingCell[] cells = getStripingCellsOfByteRange(ecSchema, cellSize,
|
StripingCell[] cells = getStripingCellsOfByteRange(ecPolicy, cellSize,
|
||||||
blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup);
|
blockGroup, rangeStartInBlockGroup, rangeEndInBlockGroup);
|
||||||
|
|
||||||
// Step 2: get the unmerged ranges on each internal block
|
// Step 2: get the unmerged ranges on each internal block
|
||||||
VerticalRange[] ranges = getRangesForInternalBlocks(ecSchema, cellSize,
|
VerticalRange[] ranges = getRangesForInternalBlocks(ecPolicy, cellSize,
|
||||||
cells);
|
cells);
|
||||||
|
|
||||||
// Step 3: merge into at most 5 stripes
|
// Step 3: merge into at most 5 stripes
|
||||||
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecSchema, ranges);
|
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
|
||||||
|
|
||||||
// Step 4: calculate each chunk's position in destination buffer
|
// Step 4: calculate each chunk's position in destination buffer
|
||||||
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf, offsetInBuf);
|
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf, offsetInBuf);
|
||||||
|
@ -446,7 +446,7 @@ public class StripedBlockUtil {
|
||||||
* used by {@link DFSStripedOutputStream} in encoding
|
* used by {@link DFSStripedOutputStream} in encoding
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
private static StripingCell[] getStripingCellsOfByteRange(ECSchema ecSchema,
|
private static StripingCell[] getStripingCellsOfByteRange(ErasureCodingPolicy ecPolicy,
|
||||||
int cellSize, LocatedStripedBlock blockGroup,
|
int cellSize, LocatedStripedBlock blockGroup,
|
||||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
|
long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
|
||||||
Preconditions.checkArgument(
|
Preconditions.checkArgument(
|
||||||
|
@ -461,16 +461,16 @@ public class StripedBlockUtil {
|
||||||
final int firstCellOffset = (int) (rangeStartInBlockGroup % cellSize);
|
final int firstCellOffset = (int) (rangeStartInBlockGroup % cellSize);
|
||||||
final int firstCellSize =
|
final int firstCellSize =
|
||||||
(int) Math.min(cellSize - (rangeStartInBlockGroup % cellSize), len);
|
(int) Math.min(cellSize - (rangeStartInBlockGroup % cellSize), len);
|
||||||
cells[0] = new StripingCell(ecSchema, firstCellSize, firstCellIdxInBG,
|
cells[0] = new StripingCell(ecPolicy, firstCellSize, firstCellIdxInBG,
|
||||||
firstCellOffset);
|
firstCellOffset);
|
||||||
if (lastCellIdxInBG != firstCellIdxInBG) {
|
if (lastCellIdxInBG != firstCellIdxInBG) {
|
||||||
final int lastCellSize = (int) (rangeEndInBlockGroup % cellSize) + 1;
|
final int lastCellSize = (int) (rangeEndInBlockGroup % cellSize) + 1;
|
||||||
cells[numCells - 1] = new StripingCell(ecSchema, lastCellSize,
|
cells[numCells - 1] = new StripingCell(ecPolicy, lastCellSize,
|
||||||
lastCellIdxInBG, 0);
|
lastCellIdxInBG, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 1; i < numCells - 1; i++) {
|
for (int i = 1; i < numCells - 1; i++) {
|
||||||
cells[i] = new StripingCell(ecSchema, cellSize, i + firstCellIdxInBG, 0);
|
cells[i] = new StripingCell(ecPolicy, cellSize, i + firstCellIdxInBG, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return cells;
|
return cells;
|
||||||
|
@ -481,10 +481,10 @@ public class StripedBlockUtil {
|
||||||
* the physical byte range (inclusive) on each stored internal block.
|
* the physical byte range (inclusive) on each stored internal block.
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
private static VerticalRange[] getRangesForInternalBlocks(ECSchema ecSchema,
|
private static VerticalRange[] getRangesForInternalBlocks(ErasureCodingPolicy ecPolicy,
|
||||||
int cellSize, StripingCell[] cells) {
|
int cellSize, StripingCell[] cells) {
|
||||||
int dataBlkNum = ecSchema.getNumDataUnits();
|
int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||||
int parityBlkNum = ecSchema.getNumParityUnits();
|
int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||||
|
|
||||||
VerticalRange ranges[] = new VerticalRange[dataBlkNum + parityBlkNum];
|
VerticalRange ranges[] = new VerticalRange[dataBlkNum + parityBlkNum];
|
||||||
|
|
||||||
|
@ -521,9 +521,9 @@ public class StripedBlockUtil {
|
||||||
* {@link AlignedStripe} instances.
|
* {@link AlignedStripe} instances.
|
||||||
*/
|
*/
|
||||||
private static AlignedStripe[] mergeRangesForInternalBlocks(
|
private static AlignedStripe[] mergeRangesForInternalBlocks(
|
||||||
ECSchema ecSchema, VerticalRange[] ranges) {
|
ErasureCodingPolicy ecPolicy, VerticalRange[] ranges) {
|
||||||
int dataBlkNum = ecSchema.getNumDataUnits();
|
int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||||
int parityBlkNum = ecSchema.getNumParityUnits();
|
int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||||
List<AlignedStripe> stripes = new ArrayList<>();
|
List<AlignedStripe> stripes = new ArrayList<>();
|
||||||
SortedSet<Long> stripePoints = new TreeSet<>();
|
SortedSet<Long> stripePoints = new TreeSet<>();
|
||||||
for (VerticalRange r : ranges) {
|
for (VerticalRange r : ranges) {
|
||||||
|
@ -628,7 +628,7 @@ public class StripedBlockUtil {
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
static class StripingCell {
|
static class StripingCell {
|
||||||
final ECSchema schema;
|
final ErasureCodingPolicy ecPolicy;
|
||||||
/** Logical order in a block group, used when doing I/O to a block group */
|
/** Logical order in a block group, used when doing I/O to a block group */
|
||||||
final int idxInBlkGroup;
|
final int idxInBlkGroup;
|
||||||
final int idxInInternalBlk;
|
final int idxInInternalBlk;
|
||||||
|
@ -642,13 +642,13 @@ public class StripedBlockUtil {
|
||||||
final int offset;
|
final int offset;
|
||||||
final int size;
|
final int size;
|
||||||
|
|
||||||
StripingCell(ECSchema ecSchema, int cellSize, int idxInBlkGroup,
|
StripingCell(ErasureCodingPolicy ecPolicy, int cellSize, int idxInBlkGroup,
|
||||||
int offset) {
|
int offset) {
|
||||||
this.schema = ecSchema;
|
this.ecPolicy = ecPolicy;
|
||||||
this.idxInBlkGroup = idxInBlkGroup;
|
this.idxInBlkGroup = idxInBlkGroup;
|
||||||
this.idxInInternalBlk = idxInBlkGroup / ecSchema.getNumDataUnits();
|
this.idxInInternalBlk = idxInBlkGroup / ecPolicy.getNumDataUnits();
|
||||||
this.idxInStripe = idxInBlkGroup -
|
this.idxInStripe = idxInBlkGroup -
|
||||||
this.idxInInternalBlk * ecSchema.getNumDataUnits();
|
this.idxInInternalBlk * ecPolicy.getNumDataUnits();
|
||||||
this.offset = offset;
|
this.offset = offset;
|
||||||
this.size = cellSize;
|
this.size = cellSize;
|
||||||
}
|
}
|
||||||
|
|
|
@ -863,8 +863,8 @@ service ClientNamenodeProtocol {
|
||||||
returns(GetCurrentEditLogTxidResponseProto);
|
returns(GetCurrentEditLogTxidResponseProto);
|
||||||
rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
|
rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
|
||||||
returns(GetEditsFromTxidResponseProto);
|
returns(GetEditsFromTxidResponseProto);
|
||||||
rpc getECSchemas(GetECSchemasRequestProto)
|
rpc getErasureCodingPolicies(GetErasureCodingPoliciesRequestProto)
|
||||||
returns(GetECSchemasResponseProto);
|
returns(GetErasureCodingPoliciesResponseProto);
|
||||||
rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
|
rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
|
||||||
returns(GetErasureCodingZoneResponseProto);
|
returns(GetErasureCodingZoneResponseProto);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,24 +28,22 @@ import "hdfs.proto";
|
||||||
*/
|
*/
|
||||||
message ErasureCodingZoneProto {
|
message ErasureCodingZoneProto {
|
||||||
required string dir = 1;
|
required string dir = 1;
|
||||||
required ECSchemaProto schema = 2;
|
required ErasureCodingPolicyProto ecPolicy = 2;
|
||||||
required uint32 cellSize = 3;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateErasureCodingZoneRequestProto {
|
message CreateErasureCodingZoneRequestProto {
|
||||||
required string src = 1;
|
required string src = 1;
|
||||||
optional ECSchemaProto schema = 2;
|
optional ErasureCodingPolicyProto ecPolicy = 2;
|
||||||
optional uint32 cellSize = 3;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateErasureCodingZoneResponseProto {
|
message CreateErasureCodingZoneResponseProto {
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetECSchemasRequestProto { // void request
|
message GetErasureCodingPoliciesRequestProto { // void request
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetECSchemasResponseProto {
|
message GetErasureCodingPoliciesResponseProto {
|
||||||
repeated ECSchemaProto schemas = 1;
|
repeated ErasureCodingPolicyProto ecPolicies = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetErasureCodingZoneRequestProto {
|
message GetErasureCodingZoneRequestProto {
|
||||||
|
@ -66,6 +64,5 @@ message BlockECRecoveryInfoProto {
|
||||||
required StorageUuidsProto targetStorageUuids = 4;
|
required StorageUuidsProto targetStorageUuids = 4;
|
||||||
required StorageTypesProto targetStorageTypes = 5;
|
required StorageTypesProto targetStorageTypes = 5;
|
||||||
repeated uint32 liveBlockIndices = 6;
|
repeated uint32 liveBlockIndices = 6;
|
||||||
required ECSchemaProto ecSchema = 7;
|
required ErasureCodingPolicyProto ecPolicy = 7;
|
||||||
required uint32 cellSize = 8;
|
|
||||||
}
|
}
|
|
@ -141,7 +141,6 @@ message INodeSection {
|
||||||
optional XAttrFeatureProto xAttrs = 9;
|
optional XAttrFeatureProto xAttrs = 9;
|
||||||
optional uint32 storagePolicyID = 10;
|
optional uint32 storagePolicyID = 10;
|
||||||
optional bool isStriped = 11;
|
optional bool isStriped = 11;
|
||||||
optional uint64 stripingCellSize = 12;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message QuotaByStorageTypeEntryProto {
|
message QuotaByStorageTypeEntryProto {
|
||||||
|
|
|
@ -306,8 +306,7 @@ message LocatedBlocksProto {
|
||||||
optional FileEncryptionInfoProto fileEncryptionInfo = 6;
|
optional FileEncryptionInfoProto fileEncryptionInfo = 6;
|
||||||
|
|
||||||
// Optional field for erasure coding
|
// Optional field for erasure coding
|
||||||
optional ECSchemaProto eCSchema = 7;
|
optional ErasureCodingPolicyProto ecPolicy = 7;
|
||||||
optional uint32 stripeCellSize = 8;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -322,11 +321,16 @@ message ECSchemaOptionEntryProto {
|
||||||
* ECSchema for erasurecoding
|
* ECSchema for erasurecoding
|
||||||
*/
|
*/
|
||||||
message ECSchemaProto {
|
message ECSchemaProto {
|
||||||
required string schemaName = 1;
|
required string codecName = 1;
|
||||||
required string codecName = 2;
|
required uint32 dataUnits = 2;
|
||||||
required uint32 dataUnits = 3;
|
required uint32 parityUnits = 3;
|
||||||
required uint32 parityUnits = 4;
|
repeated ECSchemaOptionEntryProto options = 4;
|
||||||
repeated ECSchemaOptionEntryProto options = 5;
|
}
|
||||||
|
|
||||||
|
message ErasureCodingPolicyProto {
|
||||||
|
required string name = 1;
|
||||||
|
required ECSchemaProto schema = 2;
|
||||||
|
required uint32 cellSize = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -365,8 +369,7 @@ message HdfsFileStatusProto {
|
||||||
optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
|
optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
|
||||||
|
|
||||||
// Optional field for erasure coding
|
// Optional field for erasure coding
|
||||||
optional ECSchemaProto ecSchema = 17;
|
optional ErasureCodingPolicyProto ecPolicy = 17;
|
||||||
optional uint32 stripeCellSize = 18;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
|
|
@ -1875,7 +1875,7 @@ public class DFSTestUtil {
|
||||||
assert dir != null;
|
assert dir != null;
|
||||||
dfs.mkdirs(dir);
|
dfs.mkdirs(dir);
|
||||||
try {
|
try {
|
||||||
dfs.getClient().createErasureCodingZone(dir.toString(), null, 0);
|
dfs.getClient().createErasureCodingZone(dir.toString(), null);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (!e.getMessage().contains("non-empty directory")) {
|
if (!e.getMessage().contains("non-empty directory")) {
|
||||||
throw e;
|
throw e;
|
||||||
|
|
|
@ -255,12 +255,12 @@ public class TestDFSClientRetries {
|
||||||
Mockito.doReturn(
|
Mockito.doReturn(
|
||||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||||
1010, 0, null, (byte) 0, null, 0)).when(mockNN).getFileInfo(anyString());
|
1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
|
||||||
|
|
||||||
Mockito.doReturn(
|
Mockito.doReturn(
|
||||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||||
1010, 0, null, (byte) 0, null, 0))
|
1010, 0, null, (byte) 0, null))
|
||||||
.when(mockNN)
|
.when(mockNN)
|
||||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||||
|
@ -496,7 +496,7 @@ public class TestDFSClientRetries {
|
||||||
badBlocks.add(badLocatedBlock);
|
badBlocks.add(badLocatedBlock);
|
||||||
return new LocatedBlocks(goodBlockList.getFileLength(), false,
|
return new LocatedBlocks(goodBlockList.getFileLength(), false,
|
||||||
badBlocks, null, true,
|
badBlocks, null, true,
|
||||||
null, null, 0);
|
null, null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,10 +35,10 @@ import static org.junit.Assert.assertTrue;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -59,7 +59,7 @@ public class TestDFSStripedInputStream {
|
||||||
private DistributedFileSystem fs;
|
private DistributedFileSystem fs;
|
||||||
private final Path dirPath = new Path("/striped");
|
private final Path dirPath = new Path("/striped");
|
||||||
private Path filePath = new Path(dirPath, "file");
|
private Path filePath = new Path(dirPath, "file");
|
||||||
private final ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
private final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
private final short DATA_BLK_NUM = HdfsConstants.NUM_DATA_BLOCKS;
|
private final short DATA_BLK_NUM = HdfsConstants.NUM_DATA_BLOCKS;
|
||||||
private final short PARITY_BLK_NUM = HdfsConstants.NUM_PARITY_BLOCKS;
|
private final short PARITY_BLK_NUM = HdfsConstants.NUM_PARITY_BLOCKS;
|
||||||
private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
||||||
|
@ -79,7 +79,7 @@ public class TestDFSStripedInputStream {
|
||||||
}
|
}
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
fs.mkdirs(dirPath);
|
fs.mkdirs(dirPath);
|
||||||
fs.getClient().createErasureCodingZone(dirPath.toString(), null, CELLSIZE);
|
fs.getClient().createErasureCodingZone(dirPath.toString(), null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
@ -100,7 +100,7 @@ public class TestDFSStripedInputStream {
|
||||||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
||||||
filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
|
filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
|
||||||
final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
|
final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
|
||||||
filePath.toString(), false, schema, CELLSIZE, null);
|
filePath.toString(), false, ecPolicy, null);
|
||||||
|
|
||||||
List<LocatedBlock> lbList = lbs.getLocatedBlocks();
|
List<LocatedBlock> lbList = lbs.getLocatedBlocks();
|
||||||
for (LocatedBlock aLbList : lbList) {
|
for (LocatedBlock aLbList : lbList) {
|
||||||
|
@ -152,7 +152,7 @@ public class TestDFSStripedInputStream {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
|
DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
|
||||||
filePath.toString(), false, schema, CELLSIZE, null);
|
filePath.toString(), false, ecPolicy, null);
|
||||||
|
|
||||||
int[] startOffsets = {0, 1, CELLSIZE - 102, CELLSIZE, CELLSIZE + 102,
|
int[] startOffsets = {0, 1, CELLSIZE - 102, CELLSIZE, CELLSIZE + 102,
|
||||||
CELLSIZE*DATA_BLK_NUM, CELLSIZE*DATA_BLK_NUM + 102,
|
CELLSIZE*DATA_BLK_NUM, CELLSIZE*DATA_BLK_NUM + 102,
|
||||||
|
@ -194,7 +194,7 @@ public class TestDFSStripedInputStream {
|
||||||
}
|
}
|
||||||
DFSStripedInputStream in =
|
DFSStripedInputStream in =
|
||||||
new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
|
new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
|
||||||
ErasureCodingSchemaManager.getSystemDefaultSchema(), CELLSIZE, null);
|
ErasureCodingPolicyManager.getSystemDefaultPolicy(), null);
|
||||||
int readSize = BLOCK_GROUP_SIZE;
|
int readSize = BLOCK_GROUP_SIZE;
|
||||||
byte[] readBuffer = new byte[readSize];
|
byte[] readBuffer = new byte[readSize];
|
||||||
byte[] expected = new byte[readSize];
|
byte[] expected = new byte[readSize];
|
||||||
|
@ -292,7 +292,7 @@ public class TestDFSStripedInputStream {
|
||||||
|
|
||||||
DFSStripedInputStream in =
|
DFSStripedInputStream in =
|
||||||
new DFSStripedInputStream(fs.getClient(), filePath.toString(),
|
new DFSStripedInputStream(fs.getClient(), filePath.toString(),
|
||||||
false, schema, CELLSIZE, null);
|
false, ecPolicy, null);
|
||||||
|
|
||||||
byte[] expected = new byte[fileSize];
|
byte[] expected = new byte[fileSize];
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class TestDFSStripedOutputStream {
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0);
|
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
dfs = cluster.getFileSystem();
|
dfs = cluster.getFileSystem();
|
||||||
dfs.mkdirs(dir);
|
dfs.mkdirs(dir);
|
||||||
dfs.createErasureCodingZone(dir, null, 0);
|
dfs.createErasureCodingZone(dir, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void tearDown() {
|
private void tearDown() {
|
||||||
|
|
|
@ -110,7 +110,7 @@ public class TestDFSUtil {
|
||||||
l2.setCorrupt(true);
|
l2.setCorrupt(true);
|
||||||
|
|
||||||
List<LocatedBlock> ls = Arrays.asList(l1, l2);
|
List<LocatedBlock> ls = Arrays.asList(l1, l2);
|
||||||
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null, 0);
|
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null);
|
||||||
|
|
||||||
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
|
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ public class TestDatanodeConfig {
|
||||||
public void testDataDirectories() throws IOException {
|
public void testDataDirectories() throws IOException {
|
||||||
File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
|
File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
|
||||||
Configuration conf = cluster.getConfiguration(0);
|
Configuration conf = cluster.getConfiguration(0);
|
||||||
// 1. Test unsupported schema. Only "file:" is supported.
|
// 1. Test unsupported ecPolicy. Only "file:" is supported.
|
||||||
String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
|
String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
|
||||||
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
|
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
|
||||||
DataNode dn = null;
|
DataNode dn = null;
|
||||||
|
@ -97,7 +97,7 @@ public class TestDatanodeConfig {
|
||||||
}
|
}
|
||||||
assertNull("Data-node startup should have failed.", dn);
|
assertNull("Data-node startup should have failed.", dn);
|
||||||
|
|
||||||
// 2. Test "file:" schema and no schema (path-only). Both should work.
|
// 2. Test "file:" ecPolicy and no ecPolicy (path-only). Both should work.
|
||||||
String dnDir1 = fileAsURI(dataDir).toString() + "1";
|
String dnDir1 = fileAsURI(dataDir).toString() + "1";
|
||||||
String dnDir2 = makeURI("file", "localhost",
|
String dnDir2 = makeURI("file", "localhost",
|
||||||
fileAsURI(dataDir).getPath() + "2");
|
fileAsURI(dataDir).getPath() + "2");
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hdfs;
|
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
public class TestECSchemas {
|
|
||||||
private MiniDFSCluster cluster;
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void before() throws IOException {
|
|
||||||
cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
|
|
||||||
.build();
|
|
||||||
cluster.waitActive();
|
|
||||||
}
|
|
||||||
|
|
||||||
@After
|
|
||||||
public void after() {
|
|
||||||
if (cluster != null) {
|
|
||||||
cluster.shutdown();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetECSchemas() throws Exception {
|
|
||||||
ECSchema[] ecSchemas = cluster.getFileSystem().getClient().getECSchemas();
|
|
||||||
assertNotNull(ecSchemas);
|
|
||||||
assertTrue("Should have at least one schema", ecSchemas.length > 0);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -737,7 +737,7 @@ public class TestEncryptionZones {
|
||||||
version, new byte[suite.getAlgorithmBlockSize()],
|
version, new byte[suite.getAlgorithmBlockSize()],
|
||||||
new byte[suite.getAlgorithmBlockSize()],
|
new byte[suite.getAlgorithmBlockSize()],
|
||||||
"fakeKey", "fakeVersion"),
|
"fakeKey", "fakeVersion"),
|
||||||
(byte) 0, null, 0))
|
(byte) 0, null))
|
||||||
.when(mcp)
|
.when(mcp)
|
||||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||||
|
|
|
@ -22,10 +22,10 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -65,7 +65,7 @@ public class TestErasureCodingZones {
|
||||||
fs.mkdir(testDir, FsPermission.getDirDefault());
|
fs.mkdir(testDir, FsPermission.getDirDefault());
|
||||||
|
|
||||||
/* Normal creation of an erasure coding zone */
|
/* Normal creation of an erasure coding zone */
|
||||||
fs.getClient().createErasureCodingZone(testDir.toString(), null, 0);
|
fs.getClient().createErasureCodingZone(testDir.toString(), null);
|
||||||
|
|
||||||
/* Verify files under the zone are striped */
|
/* Verify files under the zone are striped */
|
||||||
final Path ECFilePath = new Path(testDir, "foo");
|
final Path ECFilePath = new Path(testDir, "foo");
|
||||||
|
@ -78,7 +78,7 @@ public class TestErasureCodingZones {
|
||||||
fs.mkdir(notEmpty, FsPermission.getDirDefault());
|
fs.mkdir(notEmpty, FsPermission.getDirDefault());
|
||||||
fs.create(new Path(notEmpty, "foo"));
|
fs.create(new Path(notEmpty, "foo"));
|
||||||
try {
|
try {
|
||||||
fs.getClient().createErasureCodingZone(notEmpty.toString(), null, 0);
|
fs.getClient().createErasureCodingZone(notEmpty.toString(), null);
|
||||||
fail("Erasure coding zone on non-empty dir");
|
fail("Erasure coding zone on non-empty dir");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
assertExceptionContains("erasure coding zone for a non-empty directory", e);
|
assertExceptionContains("erasure coding zone for a non-empty directory", e);
|
||||||
|
@ -88,10 +88,10 @@ public class TestErasureCodingZones {
|
||||||
final Path zone1 = new Path("/zone1");
|
final Path zone1 = new Path("/zone1");
|
||||||
final Path zone2 = new Path(zone1, "zone2");
|
final Path zone2 = new Path(zone1, "zone2");
|
||||||
fs.mkdir(zone1, FsPermission.getDirDefault());
|
fs.mkdir(zone1, FsPermission.getDirDefault());
|
||||||
fs.getClient().createErasureCodingZone(zone1.toString(), null, 0);
|
fs.getClient().createErasureCodingZone(zone1.toString(), null);
|
||||||
fs.mkdir(zone2, FsPermission.getDirDefault());
|
fs.mkdir(zone2, FsPermission.getDirDefault());
|
||||||
try {
|
try {
|
||||||
fs.getClient().createErasureCodingZone(zone2.toString(), null, 0);
|
fs.getClient().createErasureCodingZone(zone2.toString(), null);
|
||||||
fail("Nested erasure coding zones");
|
fail("Nested erasure coding zones");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
assertExceptionContains("already in an erasure coding zone", e);
|
assertExceptionContains("already in an erasure coding zone", e);
|
||||||
|
@ -101,7 +101,7 @@ public class TestErasureCodingZones {
|
||||||
final Path fPath = new Path("/file");
|
final Path fPath = new Path("/file");
|
||||||
fs.create(fPath);
|
fs.create(fPath);
|
||||||
try {
|
try {
|
||||||
fs.getClient().createErasureCodingZone(fPath.toString(), null, 0);
|
fs.getClient().createErasureCodingZone(fPath.toString(), null);
|
||||||
fail("Erasure coding zone on file");
|
fail("Erasure coding zone on file");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
assertExceptionContains("erasure coding zone for a file", e);
|
assertExceptionContains("erasure coding zone for a file", e);
|
||||||
|
@ -114,8 +114,8 @@ public class TestErasureCodingZones {
|
||||||
final Path dstECDir = new Path("/dstEC");
|
final Path dstECDir = new Path("/dstEC");
|
||||||
fs.mkdir(srcECDir, FsPermission.getDirDefault());
|
fs.mkdir(srcECDir, FsPermission.getDirDefault());
|
||||||
fs.mkdir(dstECDir, FsPermission.getDirDefault());
|
fs.mkdir(dstECDir, FsPermission.getDirDefault());
|
||||||
fs.getClient().createErasureCodingZone(srcECDir.toString(), null, 0);
|
fs.getClient().createErasureCodingZone(srcECDir.toString(), null);
|
||||||
fs.getClient().createErasureCodingZone(dstECDir.toString(), null, 0);
|
fs.getClient().createErasureCodingZone(dstECDir.toString(), null);
|
||||||
final Path srcFile = new Path(srcECDir, "foo");
|
final Path srcFile = new Path(srcECDir, "foo");
|
||||||
fs.create(srcFile);
|
fs.create(srcFile);
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ public class TestErasureCodingZones {
|
||||||
public void testReplication() throws IOException {
|
public void testReplication() throws IOException {
|
||||||
final Path testDir = new Path("/ec");
|
final Path testDir = new Path("/ec");
|
||||||
fs.mkdir(testDir, FsPermission.getDirDefault());
|
fs.mkdir(testDir, FsPermission.getDirDefault());
|
||||||
fs.createErasureCodingZone(testDir, null, 0);
|
fs.createErasureCodingZone(testDir, null);
|
||||||
final Path fooFile = new Path(testDir, "foo");
|
final Path fooFile = new Path(testDir, "foo");
|
||||||
// create ec file with replication=0
|
// create ec file with replication=0
|
||||||
fs.create(fooFile, FsPermission.getFileDefault(), true,
|
fs.create(fooFile, FsPermission.getFileDefault(), true,
|
||||||
|
@ -177,47 +177,47 @@ public class TestErasureCodingZones {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetErasureCodingInfoWithSystemDefaultSchema() throws Exception {
|
public void testGetErasureCodingInfoWithSystemDefaultECPolicy() throws Exception {
|
||||||
String src = "/ec";
|
String src = "/ec";
|
||||||
final Path ecDir = new Path(src);
|
final Path ecDir = new Path(src);
|
||||||
fs.mkdir(ecDir, FsPermission.getDirDefault());
|
fs.mkdir(ecDir, FsPermission.getDirDefault());
|
||||||
// dir ECInfo before creating ec zone
|
// dir ECInfo before creating ec zone
|
||||||
assertNull(fs.getClient().getFileInfo(src).getECSchema());
|
assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
|
||||||
// dir ECInfo after creating ec zone
|
// dir ECInfo after creating ec zone
|
||||||
fs.getClient().createErasureCodingZone(src, null, 0); //Default one will be used.
|
fs.getClient().createErasureCodingZone(src, null); //Default one will be used.
|
||||||
ECSchema sysDefaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
ErasureCodingPolicy sysDefaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
verifyErasureCodingInfo(src, sysDefaultSchema);
|
verifyErasureCodingInfo(src, sysDefaultECPolicy);
|
||||||
fs.create(new Path(ecDir, "child1")).close();
|
fs.create(new Path(ecDir, "child1")).close();
|
||||||
// verify for the files in ec zone
|
// verify for the files in ec zone
|
||||||
verifyErasureCodingInfo(src + "/child1", sysDefaultSchema);
|
verifyErasureCodingInfo(src + "/child1", sysDefaultECPolicy);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetErasureCodingInfo() throws Exception {
|
public void testGetErasureCodingInfo() throws Exception {
|
||||||
ECSchema[] sysSchemas = ErasureCodingSchemaManager.getSystemSchemas();
|
ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager.getSystemPolices();
|
||||||
assertTrue("System schemas should be of only 1 for now",
|
assertTrue("System ecPolicies should be of only 1 for now",
|
||||||
sysSchemas.length == 1);
|
sysECPolicies.length == 1);
|
||||||
|
|
||||||
ECSchema usingSchema = sysSchemas[0];
|
ErasureCodingPolicy usingECPolicy = sysECPolicies[0];
|
||||||
String src = "/ec2";
|
String src = "/ec2";
|
||||||
final Path ecDir = new Path(src);
|
final Path ecDir = new Path(src);
|
||||||
fs.mkdir(ecDir, FsPermission.getDirDefault());
|
fs.mkdir(ecDir, FsPermission.getDirDefault());
|
||||||
// dir ECInfo before creating ec zone
|
// dir ECInfo before creating ec zone
|
||||||
assertNull(fs.getClient().getFileInfo(src).getECSchema());
|
assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
|
||||||
// dir ECInfo after creating ec zone
|
// dir ECInfo after creating ec zone
|
||||||
fs.getClient().createErasureCodingZone(src, usingSchema, 0);
|
fs.getClient().createErasureCodingZone(src, usingECPolicy);
|
||||||
verifyErasureCodingInfo(src, usingSchema);
|
verifyErasureCodingInfo(src, usingECPolicy);
|
||||||
fs.create(new Path(ecDir, "child1")).close();
|
fs.create(new Path(ecDir, "child1")).close();
|
||||||
// verify for the files in ec zone
|
// verify for the files in ec zone
|
||||||
verifyErasureCodingInfo(src + "/child1", usingSchema);
|
verifyErasureCodingInfo(src + "/child1", usingECPolicy);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifyErasureCodingInfo(
|
private void verifyErasureCodingInfo(
|
||||||
String src, ECSchema usingSchema) throws IOException {
|
String src, ErasureCodingPolicy usingECPolicy) throws IOException {
|
||||||
HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
|
HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
|
||||||
ECSchema schema = hdfsFileStatus.getECSchema();
|
ErasureCodingPolicy ecPolicy = hdfsFileStatus.getErasureCodingPolicy();
|
||||||
assertNotNull(schema);
|
assertNotNull(ecPolicy);
|
||||||
assertEquals("Actually used schema should be equal with target schema",
|
assertEquals("Actually used ecPolicy should be equal with target ecPolicy",
|
||||||
usingSchema, schema);
|
usingECPolicy, ecPolicy);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,13 +9,13 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestFileStatusWithECschema {
|
public class TestFileStatusWithECPolicy {
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
private DistributedFileSystem fs;
|
private DistributedFileSystem fs;
|
||||||
private DFSClient client;
|
private DFSClient client;
|
||||||
|
@ -37,29 +37,29 @@ public class TestFileStatusWithECschema {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testFileStatusWithECschema() throws Exception {
|
public void testFileStatusWithECPolicy() throws Exception {
|
||||||
// test directory not in EC zone
|
// test directory not in EC zone
|
||||||
final Path dir = new Path("/foo");
|
final Path dir = new Path("/foo");
|
||||||
assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
|
assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
|
||||||
assertNull(client.getFileInfo(dir.toString()).getECSchema());
|
assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy());
|
||||||
// test file not in EC zone
|
// test file not in EC zone
|
||||||
final Path file = new Path(dir, "foo");
|
final Path file = new Path(dir, "foo");
|
||||||
fs.create(file).close();
|
fs.create(file).close();
|
||||||
assertNull(client.getFileInfo(file.toString()).getECSchema());
|
assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
|
||||||
fs.delete(file, true);
|
fs.delete(file, true);
|
||||||
|
|
||||||
final ECSchema schema1 = ErasureCodingSchemaManager.getSystemDefaultSchema();
|
final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
// create EC zone on dir
|
// create EC zone on dir
|
||||||
fs.createErasureCodingZone(dir, schema1, 0);
|
fs.createErasureCodingZone(dir, ecPolicy1);
|
||||||
final ECSchema schame2 = client.getFileInfo(dir.toUri().getPath()).getECSchema();
|
final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
|
||||||
assertNotNull(schame2);
|
assertNotNull(ecPolicy2);
|
||||||
assertTrue(schema1.equals(schame2));
|
assertTrue(ecPolicy1.equals(ecPolicy2));
|
||||||
|
|
||||||
// test file in EC zone
|
// test file in EC zone
|
||||||
fs.create(file).close();
|
fs.create(file).close();
|
||||||
final ECSchema schame3 =
|
final ErasureCodingPolicy ecPolicy3 =
|
||||||
fs.getClient().getFileInfo(file.toUri().getPath()).getECSchema();
|
fs.getClient().getFileInfo(file.toUri().getPath()).getErasureCodingPolicy();
|
||||||
assertNotNull(schame3);
|
assertNotNull(ecPolicy3);
|
||||||
assertTrue(schema1.equals(schame3));
|
assertTrue(ecPolicy1.equals(ecPolicy3));
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -354,12 +354,12 @@ public class TestLease {
|
||||||
Mockito.doReturn(
|
Mockito.doReturn(
|
||||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||||
1010, 0, null, (byte) 0, null, 0)).when(mcp).getFileInfo(anyString());
|
1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
|
||||||
Mockito
|
Mockito
|
||||||
.doReturn(
|
.doReturn(
|
||||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||||
1010, 0, null, (byte) 0, null, 0))
|
1010, 0, null, (byte) 0, null))
|
||||||
.when(mcp)
|
.when(mcp)
|
||||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||||
|
|
|
@ -64,8 +64,7 @@ public class TestReadStripedFileWithDecoding {
|
||||||
public void setup() throws IOException {
|
public void setup() throws IOException {
|
||||||
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
|
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
|
||||||
.numDataNodes(numDNs).build();
|
.numDataNodes(numDNs).build();
|
||||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||||
null, cellSize);
|
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,8 +52,7 @@ public class TestReadStripedFileWithMissingBlocks {
|
||||||
public void setup() throws IOException {
|
public void setup() throws IOException {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||||
null, cellSize);
|
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ public class TestRecoverStripedFile {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
fs.getClient().createErasureCodingZone("/", null, 0);
|
fs.getClient().createErasureCodingZone("/", null);
|
||||||
|
|
||||||
List<DataNode> datanodes = cluster.getDataNodes();
|
List<DataNode> datanodes = cluster.getDataNodes();
|
||||||
for (int i = 0; i < dnNum; i++) {
|
for (int i = 0; i < dnNum; i++) {
|
||||||
|
|
|
@ -54,8 +54,7 @@ public class TestSafeModeWithStripedFile {
|
||||||
conf = new HdfsConfiguration();
|
conf = new HdfsConfiguration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||||
null, cellSize);
|
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,8 +57,7 @@ public class TestWriteReadStripedFile {
|
||||||
public void setup() throws IOException {
|
public void setup() throws IOException {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||||
null, cellSize);
|
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -48,8 +48,7 @@ public class TestWriteStripedFileWithFailure {
|
||||||
public void setup() throws IOException {
|
public void setup() throws IOException {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.getFileSystem().getClient().createErasureCodingZone("/",
|
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||||
null, cellSize);
|
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
|
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
||||||
|
@ -88,7 +88,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
|
@ -682,8 +682,7 @@ public class TestPBHelper {
|
||||||
short[] liveBlkIndices0 = new short[2];
|
short[] liveBlkIndices0 = new short[2];
|
||||||
BlockECRecoveryInfo blkECRecoveryInfo0 = new BlockECRecoveryInfo(
|
BlockECRecoveryInfo blkECRecoveryInfo0 = new BlockECRecoveryInfo(
|
||||||
new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0,
|
new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0,
|
||||||
liveBlkIndices0, ErasureCodingSchemaManager.getSystemDefaultSchema(),
|
liveBlkIndices0, ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||||
64 * 1024);
|
|
||||||
DatanodeInfo[] dnInfos1 = new DatanodeInfo[] {
|
DatanodeInfo[] dnInfos1 = new DatanodeInfo[] {
|
||||||
DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
|
DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
|
||||||
DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil
|
DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil
|
||||||
|
@ -697,8 +696,7 @@ public class TestPBHelper {
|
||||||
short[] liveBlkIndices1 = new short[2];
|
short[] liveBlkIndices1 = new short[2];
|
||||||
BlockECRecoveryInfo blkECRecoveryInfo1 = new BlockECRecoveryInfo(
|
BlockECRecoveryInfo blkECRecoveryInfo1 = new BlockECRecoveryInfo(
|
||||||
new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1,
|
new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1,
|
||||||
liveBlkIndices1, ErasureCodingSchemaManager.getSystemDefaultSchema(),
|
liveBlkIndices1, ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||||
64 * 1024);
|
|
||||||
List<BlockECRecoveryInfo> blkRecoveryInfosList = new ArrayList<BlockECRecoveryInfo>();
|
List<BlockECRecoveryInfo> blkRecoveryInfosList = new ArrayList<BlockECRecoveryInfo>();
|
||||||
blkRecoveryInfosList.add(blkECRecoveryInfo0);
|
blkRecoveryInfosList.add(blkECRecoveryInfo0);
|
||||||
blkRecoveryInfosList.add(blkECRecoveryInfo1);
|
blkRecoveryInfosList.add(blkECRecoveryInfo1);
|
||||||
|
@ -740,18 +738,18 @@ public class TestPBHelper {
|
||||||
assertEquals(liveBlockIndices1[i], liveBlockIndices2[i]);
|
assertEquals(liveBlockIndices1[i], liveBlockIndices2[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
ECSchema ecSchema1 = blkECRecoveryInfo1.getECSchema();
|
ErasureCodingPolicy ecPolicy1 = blkECRecoveryInfo1.getErasureCodingPolicy();
|
||||||
ECSchema ecSchema2 = blkECRecoveryInfo2.getECSchema();
|
ErasureCodingPolicy ecPolicy2 = blkECRecoveryInfo2.getErasureCodingPolicy();
|
||||||
// Compare ECSchemas same as default ECSchema as we used system default
|
// Compare ECPolicies same as default ECPolicy as we used system default
|
||||||
// ECSchema used in this test
|
// ECPolicy used in this test
|
||||||
compareECSchemas(ErasureCodingSchemaManager.getSystemDefaultSchema(), ecSchema1);
|
compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy1);
|
||||||
compareECSchemas(ErasureCodingSchemaManager.getSystemDefaultSchema(), ecSchema2);
|
compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy2);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void compareECSchemas(ECSchema ecSchema1, ECSchema ecSchema2) {
|
private void compareECPolicies(ErasureCodingPolicy ecPolicy1, ErasureCodingPolicy ecPolicy2) {
|
||||||
assertEquals(ecSchema1.getSchemaName(), ecSchema2.getSchemaName());
|
assertEquals(ecPolicy1.getName(), ecPolicy2.getName());
|
||||||
assertEquals(ecSchema1.getNumDataUnits(), ecSchema2.getNumDataUnits());
|
assertEquals(ecPolicy1.getNumDataUnits(), ecPolicy2.getNumDataUnits());
|
||||||
assertEquals(ecSchema1.getNumParityUnits(), ecSchema2.getNumParityUnits());
|
assertEquals(ecPolicy1.getNumParityUnits(), ecPolicy2.getNumParityUnits());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertDnInfosEqual(DatanodeInfo[] dnInfos1,
|
private void assertDnInfosEqual(DatanodeInfo[] dnInfos1,
|
||||||
|
|
|
@ -1503,7 +1503,7 @@ public class TestBalancer {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
|
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
|
||||||
ClientProtocol.class).getProxy();
|
ClientProtocol.class).getProxy();
|
||||||
client.createErasureCodingZone("/", null, 0);
|
client.createErasureCodingZone("/", null);
|
||||||
|
|
||||||
long totalCapacity = sum(capacities);
|
long totalCapacity = sum(capacities);
|
||||||
|
|
||||||
|
|
|
@ -19,10 +19,9 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.internal.util.reflection.Whitebox;
|
import org.mockito.internal.util.reflection.Whitebox;
|
||||||
|
@ -45,11 +44,10 @@ public class TestBlockInfoStriped {
|
||||||
private static final int TOTAL_NUM_BLOCKS = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
private static final int TOTAL_NUM_BLOCKS = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||||
private static final long BASE_ID = -1600;
|
private static final long BASE_ID = -1600;
|
||||||
private static final Block baseBlock = new Block(BASE_ID);
|
private static final Block baseBlock = new Block(BASE_ID);
|
||||||
private static final ECSchema testSchema
|
private static final ErasureCodingPolicy testECPolicy
|
||||||
= ErasureCodingSchemaManager.getSystemDefaultSchema();
|
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
|
||||||
private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
|
private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
|
||||||
testSchema, cellSize);
|
testECPolicy);
|
||||||
|
|
||||||
private Block[] createReportedBlocks(int num) {
|
private Block[] createReportedBlocks(int num) {
|
||||||
Block[] blocks = new Block[num];
|
Block[] blocks = new Block[num];
|
||||||
|
@ -237,7 +235,7 @@ public class TestBlockInfoStriped {
|
||||||
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
||||||
DataOutput out = new DataOutputStream(byteStream);
|
DataOutput out = new DataOutputStream(byteStream);
|
||||||
BlockInfoStriped blk = new BlockInfoStriped(new Block(blkID, numBytes,
|
BlockInfoStriped blk = new BlockInfoStriped(new Block(blkID, numBytes,
|
||||||
generationStamp), testSchema, cellSize);
|
generationStamp), testECPolicy);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
blk.write(out);
|
blk.write(out);
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
|
||||||
conf = getConf();
|
conf = getConf();
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.getFileSystem().getClient()
|
cluster.getFileSystem().getClient()
|
||||||
.createErasureCodingZone("/", null, cellSize);
|
.createErasureCodingZone("/", null);
|
||||||
try {
|
try {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
doTestRead(conf, cluster, true);
|
doTestRead(conf, cluster, true);
|
||||||
|
|
|
@ -86,7 +86,7 @@ public class TestSequentialBlockGroupId {
|
||||||
.getBlockGroupIdGenerator();
|
.getBlockGroupIdGenerator();
|
||||||
fs.mkdirs(eczone);
|
fs.mkdirs(eczone);
|
||||||
cluster.getFileSystem().getClient()
|
cluster.getFileSystem().getClient()
|
||||||
.createErasureCodingZone("/eczone", null, cellSize);
|
.createErasureCodingZone("/eczone", null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
|
|
@ -19,9 +19,8 @@
|
||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
@ -31,17 +30,15 @@ import static org.junit.Assert.fail;
|
||||||
|
|
||||||
public class TestUnderReplicatedBlockQueues {
|
public class TestUnderReplicatedBlockQueues {
|
||||||
|
|
||||||
private final ECSchema ecSchema =
|
private final ErasureCodingPolicy ecPolicy =
|
||||||
ErasureCodingSchemaManager.getSystemDefaultSchema();
|
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
|
||||||
|
|
||||||
private BlockInfo genBlockInfo(long id) {
|
private BlockInfo genBlockInfo(long id) {
|
||||||
return new BlockInfoContiguous(new Block(id), (short) 3);
|
return new BlockInfoContiguous(new Block(id), (short) 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockInfo genStripedBlockInfo(long id, long numBytes) {
|
private BlockInfo genStripedBlockInfo(long id, long numBytes) {
|
||||||
BlockInfoStriped sblk = new BlockInfoStriped(new Block(id), ecSchema,
|
BlockInfoStriped sblk = new BlockInfoStriped(new Block(id), ecPolicy);
|
||||||
CELLSIZE);
|
|
||||||
sblk.setNumBytes(numBytes);
|
sblk.setNumBytes(numBytes);
|
||||||
return sblk;
|
return sblk;
|
||||||
}
|
}
|
||||||
|
@ -101,8 +98,8 @@ public class TestUnderReplicatedBlockQueues {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStripedBlockPriorities() throws Throwable {
|
public void testStripedBlockPriorities() throws Throwable {
|
||||||
int dataBlkNum = ecSchema.getNumDataUnits();
|
int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||||
int parityBlkNUm = ecSchema.getNumParityUnits();
|
int parityBlkNUm = ecPolicy.getNumParityUnits();
|
||||||
doTestStripedBlockPriorities(1, parityBlkNUm);
|
doTestStripedBlockPriorities(1, parityBlkNUm);
|
||||||
doTestStripedBlockPriorities(dataBlkNum, parityBlkNUm);
|
doTestStripedBlockPriorities(dataBlkNum, parityBlkNUm);
|
||||||
}
|
}
|
||||||
|
@ -110,7 +107,7 @@ public class TestUnderReplicatedBlockQueues {
|
||||||
private void doTestStripedBlockPriorities(int dataBlkNum, int parityBlkNum)
|
private void doTestStripedBlockPriorities(int dataBlkNum, int parityBlkNum)
|
||||||
throws Throwable {
|
throws Throwable {
|
||||||
int groupSize = dataBlkNum + parityBlkNum;
|
int groupSize = dataBlkNum + parityBlkNum;
|
||||||
long numBytes = CELLSIZE * dataBlkNum;
|
long numBytes = ecPolicy.getCellSize() * dataBlkNum;
|
||||||
UnderReplicatedBlocks queues = new UnderReplicatedBlocks();
|
UnderReplicatedBlocks queues = new UnderReplicatedBlocks();
|
||||||
|
|
||||||
// add a striped block which been left NUM_DATA_BLOCKS internal blocks
|
// add a striped block which been left NUM_DATA_BLOCKS internal blocks
|
||||||
|
|
|
@ -426,7 +426,7 @@ public class TestMover {
|
||||||
client.setStoragePolicy(barDir,
|
client.setStoragePolicy(barDir,
|
||||||
HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
|
HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
|
||||||
// set "/bar" directory with EC zone.
|
// set "/bar" directory with EC zone.
|
||||||
client.createErasureCodingZone(barDir, null, 0);
|
client.createErasureCodingZone(barDir, null);
|
||||||
|
|
||||||
// write file to barDir
|
// write file to barDir
|
||||||
final String fooFile = "/bar/foo";
|
final String fooFile = "/bar/foo";
|
||||||
|
|
|
@ -70,7 +70,7 @@ public class TestAddOverReplicatedStripedBlocks {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
fs.mkdirs(dirPath);
|
fs.mkdirs(dirPath);
|
||||||
fs.getClient().createErasureCodingZone(dirPath.toString(), null, CELLSIZE);
|
fs.getClient().createErasureCodingZone(dirPath.toString(), null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
@ -180,7 +180,7 @@ public class TestAddOverReplicatedStripedBlocks {
|
||||||
long groupId = bg.getBlock().getBlockId();
|
long groupId = bg.getBlock().getBlockId();
|
||||||
Block blk = new Block(groupId, BLOCK_SIZE, gs);
|
Block blk = new Block(groupId, BLOCK_SIZE, gs);
|
||||||
BlockInfoStriped blockInfo = new BlockInfoStriped(blk,
|
BlockInfoStriped blockInfo = new BlockInfoStriped(blk,
|
||||||
ErasureCodingSchemaManager.getSystemDefaultSchema(), CELLSIZE);
|
ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||||
for (int i = 0; i < GROUP_SIZE; i++) {
|
for (int i = 0; i < GROUP_SIZE; i++) {
|
||||||
blk.setBlockId(groupId + i);
|
blk.setBlockId(groupId + i);
|
||||||
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
|
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class TestAddStripedBlocks {
|
||||||
.numDataNodes(GROUP_SIZE).build();
|
.numDataNodes(GROUP_SIZE).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
dfs = cluster.getFileSystem();
|
dfs = cluster.getFileSystem();
|
||||||
dfs.getClient().createErasureCodingZone("/", null, 0);
|
dfs.getClient().createErasureCodingZone("/", null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
|
|
@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
|
@ -57,7 +57,6 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.apache.hadoop.test.PathUtils;
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -76,8 +75,8 @@ public class TestFSEditLogLoader {
|
||||||
|
|
||||||
private static final int NUM_DATA_NODES = 0;
|
private static final int NUM_DATA_NODES = 0;
|
||||||
|
|
||||||
private static final ECSchema testSchema
|
private static final ErasureCodingPolicy testECPolicy
|
||||||
= ErasureCodingSchemaManager.getSystemDefaultSchema();
|
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDisplayRecentEditLogOpCodes() throws IOException {
|
public void testDisplayRecentEditLogOpCodes() throws IOException {
|
||||||
|
@ -450,11 +449,10 @@ public class TestFSEditLogLoader {
|
||||||
long timestamp = 1426222918;
|
long timestamp = 1426222918;
|
||||||
short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
|
short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
|
||||||
short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
|
short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
|
||||||
int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
|
||||||
|
|
||||||
//set the storage policy of the directory
|
//set the storage policy of the directory
|
||||||
fs.mkdir(new Path(testDir), new FsPermission("755"));
|
fs.mkdir(new Path(testDir), new FsPermission("755"));
|
||||||
fs.getClient().getNamenode().createErasureCodingZone(testDir, null, 0);
|
fs.getClient().getNamenode().createErasureCodingZone(testDir, null);
|
||||||
|
|
||||||
// Create a file with striped block
|
// Create a file with striped block
|
||||||
Path p = new Path(testFilePath);
|
Path p = new Path(testFilePath);
|
||||||
|
@ -466,7 +464,7 @@ public class TestFSEditLogLoader {
|
||||||
|
|
||||||
// Add a striped block to the file
|
// Add a striped block to the file
|
||||||
BlockInfoStriped stripedBlk = new BlockInfoStriped(
|
BlockInfoStriped stripedBlk = new BlockInfoStriped(
|
||||||
new Block(blkId, blkNumBytes, timestamp), testSchema, cellSize);
|
new Block(blkId, blkNumBytes, timestamp), testECPolicy);
|
||||||
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
|
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
|
||||||
file.toUnderConstruction(clientName, clientMachine);
|
file.toUnderConstruction(clientName, clientMachine);
|
||||||
file.addBlock(stripedBlk);
|
file.addBlock(stripedBlk);
|
||||||
|
@ -491,7 +489,6 @@ public class TestFSEditLogLoader {
|
||||||
assertEquals(timestamp, blks[0].getGenerationStamp());
|
assertEquals(timestamp, blks[0].getGenerationStamp());
|
||||||
assertEquals(blockNum, ((BlockInfoStriped)blks[0]).getDataBlockNum());
|
assertEquals(blockNum, ((BlockInfoStriped)blks[0]).getDataBlockNum());
|
||||||
assertEquals(parityNum, ((BlockInfoStriped)blks[0]).getParityBlockNum());
|
assertEquals(parityNum, ((BlockInfoStriped)blks[0]).getParityBlockNum());
|
||||||
assertEquals(cellSize, ((BlockInfoStriped)blks[0]).getCellSize());
|
|
||||||
|
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
cluster = null;
|
cluster = null;
|
||||||
|
@ -524,17 +521,16 @@ public class TestFSEditLogLoader {
|
||||||
long timestamp = 1426222918;
|
long timestamp = 1426222918;
|
||||||
short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
|
short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
|
||||||
short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
|
short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
|
||||||
int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
|
||||||
|
|
||||||
//set the storage policy of the directory
|
//set the storage policy of the directory
|
||||||
fs.mkdir(new Path(testDir), new FsPermission("755"));
|
fs.mkdir(new Path(testDir), new FsPermission("755"));
|
||||||
fs.getClient().getNamenode().createErasureCodingZone(testDir, null, 0);
|
fs.getClient().getNamenode().createErasureCodingZone(testDir, null);
|
||||||
|
|
||||||
//create a file with striped blocks
|
//create a file with striped blocks
|
||||||
Path p = new Path(testFilePath);
|
Path p = new Path(testFilePath);
|
||||||
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
|
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
|
||||||
BlockInfoStriped stripedBlk = new BlockInfoStriped(
|
BlockInfoStriped stripedBlk = new BlockInfoStriped(
|
||||||
new Block(blkId, blkNumBytes, timestamp), testSchema, cellSize);
|
new Block(blkId, blkNumBytes, timestamp), testECPolicy);
|
||||||
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
|
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
|
||||||
file.toUnderConstruction(clientName, clientMachine);
|
file.toUnderConstruction(clientName, clientMachine);
|
||||||
file.addBlock(stripedBlk);
|
file.addBlock(stripedBlk);
|
||||||
|
@ -573,7 +569,6 @@ public class TestFSEditLogLoader {
|
||||||
assertEquals(newTimestamp, blks[0].getGenerationStamp());
|
assertEquals(newTimestamp, blks[0].getGenerationStamp());
|
||||||
assertEquals(blockNum, ((BlockInfoStriped)blks[0]).getDataBlockNum());
|
assertEquals(blockNum, ((BlockInfoStriped)blks[0]).getDataBlockNum());
|
||||||
assertEquals(parityNum, ((BlockInfoStriped)blks[0]).getParityBlockNum());
|
assertEquals(parityNum, ((BlockInfoStriped)blks[0]).getParityBlockNum());
|
||||||
assertEquals(cellSize, ((BlockInfoStriped)blks[0]).getCellSize());
|
|
||||||
|
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
cluster = null;
|
cluster = null;
|
||||||
|
|
|
@ -28,11 +28,11 @@ import java.io.IOException;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
|
@ -68,9 +68,8 @@ public class TestFSImage {
|
||||||
|
|
||||||
private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
|
private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
|
||||||
"image-with-zero-block-size.tar.gz";
|
"image-with-zero-block-size.tar.gz";
|
||||||
private static final ECSchema testSchema
|
private static final ErasureCodingPolicy testECPolicy
|
||||||
= ErasureCodingSchemaManager.getSystemDefaultSchema();
|
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPersist() throws IOException {
|
public void testPersist() throws IOException {
|
||||||
|
@ -141,7 +140,7 @@ public class TestFSImage {
|
||||||
private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
|
private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
|
||||||
boolean isUC) throws IOException{
|
boolean isUC) throws IOException{
|
||||||
// contruct a INode with StripedBlock for saving and loading
|
// contruct a INode with StripedBlock for saving and loading
|
||||||
fsn.createErasureCodingZone("/", null, 0, false);
|
fsn.createErasureCodingZone("/", null, false);
|
||||||
long id = 123456789;
|
long id = 123456789;
|
||||||
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
|
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
|
||||||
PermissionStatus permissionStatus = new PermissionStatus("testuser_a",
|
PermissionStatus permissionStatus = new PermissionStatus("testuser_a",
|
||||||
|
@ -162,7 +161,7 @@ public class TestFSImage {
|
||||||
for (int i = 0; i < stripedBlks.length; i++) {
|
for (int i = 0; i < stripedBlks.length; i++) {
|
||||||
stripedBlks[i] = new BlockInfoStriped(
|
stripedBlks[i] = new BlockInfoStriped(
|
||||||
new Block(stripedBlkId + i, preferredBlockSize, timestamp),
|
new Block(stripedBlkId + i, preferredBlockSize, timestamp),
|
||||||
testSchema, cellSize);
|
testECPolicy);
|
||||||
file.addBlock(stripedBlks[i]);
|
file.addBlock(stripedBlks[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,7 +385,7 @@ public class TestFSImage {
|
||||||
.build();
|
.build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
DistributedFileSystem fs = cluster.getFileSystem();
|
DistributedFileSystem fs = cluster.getFileSystem();
|
||||||
fs.getClient().getNamenode().createErasureCodingZone("/", null, 0);
|
fs.getClient().getNamenode().createErasureCodingZone("/", null);
|
||||||
Path file = new Path("/striped");
|
Path file = new Path("/striped");
|
||||||
FSDataOutputStream out = fs.create(file);
|
FSDataOutputStream out = fs.create(file);
|
||||||
byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE);
|
byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE);
|
||||||
|
|
|
@ -1202,7 +1202,7 @@ public class TestFsck {
|
||||||
|
|
||||||
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
|
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
|
||||||
blockSize, modTime, accessTime, perms, owner, group, symlink,
|
blockSize, modTime, accessTime, perms, owner, group, symlink,
|
||||||
path, fileId, numChildren, null, storagePolicy, null, 0);
|
path, fileId, numChildren, null, storagePolicy, null);
|
||||||
Result replRes = new ReplicationResult(conf);
|
Result replRes = new ReplicationResult(conf);
|
||||||
Result ecRes = new ErasureCodingResult(conf);
|
Result ecRes = new ErasureCodingResult(conf);
|
||||||
|
|
||||||
|
@ -1644,8 +1644,8 @@ public class TestFsck {
|
||||||
final long precision = 1L;
|
final long precision = 1L;
|
||||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
|
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||||
int totalSize = ErasureCodingSchemaManager.getSystemDefaultSchema().getNumDataUnits()
|
int totalSize = ErasureCodingPolicyManager.getSystemDefaultPolicy().getNumDataUnits()
|
||||||
+ ErasureCodingSchemaManager.getSystemDefaultSchema().getNumParityUnits();
|
+ ErasureCodingPolicyManager.getSystemDefaultPolicy().getNumParityUnits();
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
|
|
||||||
|
|
|
@ -26,11 +26,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -44,10 +44,10 @@ import java.io.IOException;
|
||||||
public class TestQuotaWithStripedBlocks {
|
public class TestQuotaWithStripedBlocks {
|
||||||
private static final int BLOCK_SIZE = 1024 * 1024;
|
private static final int BLOCK_SIZE = 1024 * 1024;
|
||||||
private static final long DISK_QUOTA = BLOCK_SIZE * 10;
|
private static final long DISK_QUOTA = BLOCK_SIZE * 10;
|
||||||
private static final ECSchema ecSchema =
|
private static final ErasureCodingPolicy ecPolicy =
|
||||||
ErasureCodingSchemaManager.getSystemDefaultSchema();
|
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
private static final int NUM_DATA_BLOCKS = ecSchema.getNumDataUnits();
|
private static final int NUM_DATA_BLOCKS = ecPolicy.getNumDataUnits();
|
||||||
private static final int NUM_PARITY_BLOCKS = ecSchema.getNumParityUnits();
|
private static final int NUM_PARITY_BLOCKS = ecPolicy.getNumParityUnits();
|
||||||
private static final int GROUP_SIZE = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
private static final int GROUP_SIZE = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||||
private static final Path ecDir = new Path("/ec");
|
private static final Path ecDir = new Path("/ec");
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ public class TestQuotaWithStripedBlocks {
|
||||||
dfs = cluster.getFileSystem();
|
dfs = cluster.getFileSystem();
|
||||||
|
|
||||||
dfs.mkdirs(ecDir);
|
dfs.mkdirs(ecDir);
|
||||||
dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema, 0);
|
dfs.getClient().createErasureCodingZone(ecDir.toString(), ecPolicy);
|
||||||
dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
|
dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
|
||||||
dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
|
dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
|
||||||
dfs.setStoragePolicy(ecDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
|
dfs.setStoragePolicy(ecDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
|
||||||
|
|
|
@ -35,13 +35,13 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionStriped;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionStriped;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
|
||||||
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -59,9 +59,8 @@ public class TestStripedINodeFile {
|
||||||
private final BlockStoragePolicy defaultPolicy =
|
private final BlockStoragePolicy defaultPolicy =
|
||||||
defaultSuite.getDefaultPolicy();
|
defaultSuite.getDefaultPolicy();
|
||||||
|
|
||||||
private static final ECSchema testSchema
|
private static final ErasureCodingPolicy testECPolicy
|
||||||
= ErasureCodingSchemaManager.getSystemDefaultSchema();
|
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||||
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
|
|
||||||
|
|
||||||
private static INodeFile createStripedINodeFile() {
|
private static INodeFile createStripedINodeFile() {
|
||||||
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
||||||
|
@ -79,7 +78,7 @@ public class TestStripedINodeFile {
|
||||||
public void testBlockStripedTotalBlockCount() {
|
public void testBlockStripedTotalBlockCount() {
|
||||||
Block blk = new Block(1);
|
Block blk = new Block(1);
|
||||||
BlockInfoStriped blockInfoStriped
|
BlockInfoStriped blockInfoStriped
|
||||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
= new BlockInfoStriped(blk, testECPolicy);
|
||||||
assertEquals(9, blockInfoStriped.getTotalBlockNum());
|
assertEquals(9, blockInfoStriped.getTotalBlockNum());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +88,7 @@ public class TestStripedINodeFile {
|
||||||
INodeFile inf = createStripedINodeFile();
|
INodeFile inf = createStripedINodeFile();
|
||||||
Block blk = new Block(1);
|
Block blk = new Block(1);
|
||||||
BlockInfoStriped blockInfoStriped
|
BlockInfoStriped blockInfoStriped
|
||||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
= new BlockInfoStriped(blk, testECPolicy);
|
||||||
inf.addBlock(blockInfoStriped);
|
inf.addBlock(blockInfoStriped);
|
||||||
assertEquals(1, inf.getBlocks().length);
|
assertEquals(1, inf.getBlocks().length);
|
||||||
}
|
}
|
||||||
|
@ -100,7 +99,7 @@ public class TestStripedINodeFile {
|
||||||
INodeFile inf = createStripedINodeFile();
|
INodeFile inf = createStripedINodeFile();
|
||||||
Block blk = new Block(1);
|
Block blk = new Block(1);
|
||||||
BlockInfoStriped blockInfoStriped
|
BlockInfoStriped blockInfoStriped
|
||||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
= new BlockInfoStriped(blk, testECPolicy);
|
||||||
blockInfoStriped.setNumBytes(1);
|
blockInfoStriped.setNumBytes(1);
|
||||||
inf.addBlock(blockInfoStriped);
|
inf.addBlock(blockInfoStriped);
|
||||||
// 0. Calculate the total bytes per stripes <Num Bytes per Stripes>
|
// 0. Calculate the total bytes per stripes <Num Bytes per Stripes>
|
||||||
|
@ -125,11 +124,11 @@ public class TestStripedINodeFile {
|
||||||
INodeFile inf = createStripedINodeFile();
|
INodeFile inf = createStripedINodeFile();
|
||||||
Block blk1 = new Block(1);
|
Block blk1 = new Block(1);
|
||||||
BlockInfoStriped blockInfoStriped1
|
BlockInfoStriped blockInfoStriped1
|
||||||
= new BlockInfoStriped(blk1, testSchema, cellSize);
|
= new BlockInfoStriped(blk1, testECPolicy);
|
||||||
blockInfoStriped1.setNumBytes(1);
|
blockInfoStriped1.setNumBytes(1);
|
||||||
Block blk2 = new Block(2);
|
Block blk2 = new Block(2);
|
||||||
BlockInfoStriped blockInfoStriped2
|
BlockInfoStriped blockInfoStriped2
|
||||||
= new BlockInfoStriped(blk2, testSchema, cellSize);
|
= new BlockInfoStriped(blk2, testECPolicy);
|
||||||
blockInfoStriped2.setNumBytes(1);
|
blockInfoStriped2.setNumBytes(1);
|
||||||
inf.addBlock(blockInfoStriped1);
|
inf.addBlock(blockInfoStriped1);
|
||||||
inf.addBlock(blockInfoStriped2);
|
inf.addBlock(blockInfoStriped2);
|
||||||
|
@ -144,7 +143,7 @@ public class TestStripedINodeFile {
|
||||||
INodeFile inf = createStripedINodeFile();
|
INodeFile inf = createStripedINodeFile();
|
||||||
Block blk = new Block(1);
|
Block blk = new Block(1);
|
||||||
BlockInfoStriped blockInfoStriped
|
BlockInfoStriped blockInfoStriped
|
||||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
= new BlockInfoStriped(blk, testECPolicy);
|
||||||
blockInfoStriped.setNumBytes(100);
|
blockInfoStriped.setNumBytes(100);
|
||||||
inf.addBlock(blockInfoStriped);
|
inf.addBlock(blockInfoStriped);
|
||||||
// Compute file size should return actual data
|
// Compute file size should return actual data
|
||||||
|
@ -159,7 +158,7 @@ public class TestStripedINodeFile {
|
||||||
INodeFile inf = createStripedINodeFile();
|
INodeFile inf = createStripedINodeFile();
|
||||||
Block blk = new Block(1);
|
Block blk = new Block(1);
|
||||||
BlockInfoUnderConstructionStriped bInfoUCStriped
|
BlockInfoUnderConstructionStriped bInfoUCStriped
|
||||||
= new BlockInfoUnderConstructionStriped(blk, testSchema, cellSize);
|
= new BlockInfoUnderConstructionStriped(blk, testECPolicy);
|
||||||
bInfoUCStriped.setNumBytes(100);
|
bInfoUCStriped.setNumBytes(100);
|
||||||
inf.addBlock(bInfoUCStriped);
|
inf.addBlock(bInfoUCStriped);
|
||||||
assertEquals(100, inf.computeFileSize());
|
assertEquals(100, inf.computeFileSize());
|
||||||
|
@ -172,7 +171,7 @@ public class TestStripedINodeFile {
|
||||||
INodeFile inf = createStripedINodeFile();
|
INodeFile inf = createStripedINodeFile();
|
||||||
Block blk = new Block(1);
|
Block blk = new Block(1);
|
||||||
BlockInfoStriped blockInfoStriped
|
BlockInfoStriped blockInfoStriped
|
||||||
= new BlockInfoStriped(blk, testSchema, cellSize);
|
= new BlockInfoStriped(blk, testECPolicy);
|
||||||
blockInfoStriped.setNumBytes(100);
|
blockInfoStriped.setNumBytes(100);
|
||||||
inf.addBlock(blockInfoStriped);
|
inf.addBlock(blockInfoStriped);
|
||||||
|
|
||||||
|
@ -193,7 +192,7 @@ public class TestStripedINodeFile {
|
||||||
INodeFile inf = createStripedINodeFile();
|
INodeFile inf = createStripedINodeFile();
|
||||||
Block blk = new Block(1);
|
Block blk = new Block(1);
|
||||||
BlockInfoUnderConstructionStriped bInfoUCStriped
|
BlockInfoUnderConstructionStriped bInfoUCStriped
|
||||||
= new BlockInfoUnderConstructionStriped(blk, testSchema, cellSize);
|
= new BlockInfoUnderConstructionStriped(blk, testECPolicy);
|
||||||
bInfoUCStriped.setNumBytes(100);
|
bInfoUCStriped.setNumBytes(100);
|
||||||
inf.addBlock(bInfoUCStriped);
|
inf.addBlock(bInfoUCStriped);
|
||||||
|
|
||||||
|
@ -235,7 +234,7 @@ public class TestStripedINodeFile {
|
||||||
dfs.mkdirs(zone);
|
dfs.mkdirs(zone);
|
||||||
|
|
||||||
// create erasure zone
|
// create erasure zone
|
||||||
dfs.createErasureCodingZone(zone, null, 0);
|
dfs.createErasureCodingZone(zone, null);
|
||||||
DFSTestUtil.createFile(dfs, zoneFile, len, (short) 1, 0xFEED);
|
DFSTestUtil.createFile(dfs, zoneFile, len, (short) 1, 0xFEED);
|
||||||
DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
|
DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
|
||||||
final FSDirectory fsd = fsn.getFSDirectory();
|
final FSDirectory fsd = fsn.getFSDirectory();
|
||||||
|
|
|
@ -60,7 +60,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
cluster.getFileSystem().getClient().createErasureCodingZone("/", null, 0);
|
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
Path eczone = new Path("/eczone");
|
Path eczone = new Path("/eczone");
|
||||||
fs.mkdirs(eczone);
|
fs.mkdirs(eczone);
|
||||||
|
|
|
@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
|
||||||
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*;
|
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
|
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -84,8 +84,8 @@ public class TestStripedBlockUtil {
|
||||||
private final int FULL_STRIPE_SIZE = DATA_BLK_NUM * CELLSIZE;
|
private final int FULL_STRIPE_SIZE = DATA_BLK_NUM * CELLSIZE;
|
||||||
/** number of full stripes in a full block group */
|
/** number of full stripes in a full block group */
|
||||||
private final int BLK_GROUP_STRIPE_NUM = 16;
|
private final int BLK_GROUP_STRIPE_NUM = 16;
|
||||||
private final ECSchema SCEHMA = ErasureCodingSchemaManager.
|
private final ErasureCodingPolicy ECPOLICY = ErasureCodingPolicyManager.
|
||||||
getSystemDefaultSchema();
|
getSystemDefaultPolicy();
|
||||||
private final Random random = new Random();
|
private final Random random = new Random();
|
||||||
|
|
||||||
private int[] blockGroupSizes;
|
private int[] blockGroupSizes;
|
||||||
|
@ -152,7 +152,7 @@ public class TestStripedBlockUtil {
|
||||||
int done = 0;
|
int done = 0;
|
||||||
while (done < bgSize) {
|
while (done < bgSize) {
|
||||||
Preconditions.checkState(done % CELLSIZE == 0);
|
Preconditions.checkState(done % CELLSIZE == 0);
|
||||||
StripingCell cell = new StripingCell(SCEHMA, CELLSIZE, done / CELLSIZE, 0);
|
StripingCell cell = new StripingCell(ECPOLICY, CELLSIZE, done / CELLSIZE, 0);
|
||||||
int idxInStripe = cell.idxInStripe;
|
int idxInStripe = cell.idxInStripe;
|
||||||
int size = Math.min(CELLSIZE, bgSize - done);
|
int size = Math.min(CELLSIZE, bgSize - done);
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
|
@ -245,7 +245,7 @@ public class TestStripedBlockUtil {
|
||||||
if (brStart + brSize > bgSize) {
|
if (brStart + brSize > bgSize) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
AlignedStripe[] stripes = divideByteRangeIntoStripes(SCEHMA,
|
AlignedStripe[] stripes = divideByteRangeIntoStripes(ECPOLICY,
|
||||||
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
|
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
|
||||||
|
|
||||||
for (AlignedStripe stripe : stripes) {
|
for (AlignedStripe stripe : stripes) {
|
||||||
|
|
|
@ -65,7 +65,7 @@ public class TestJsonUtil {
|
||||||
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
|
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
|
||||||
now, now + 10, new FsPermission((short) 0644), "user", "group",
|
now, now + 10, new FsPermission((short) 0644), "user", "group",
|
||||||
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
|
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
|
||||||
HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null, 0);
|
HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null);
|
||||||
final FileStatus fstatus = toFileStatus(status, parent);
|
final FileStatus fstatus = toFileStatus(status, parent);
|
||||||
System.out.println("status = " + status);
|
System.out.println("status = " + status);
|
||||||
System.out.println("fstatus = " + fstatus);
|
System.out.println("fstatus = " + fstatus);
|
||||||
|
|
|
@ -57,11 +57,11 @@
|
||||||
<comparators>
|
<comparators>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>RegexpComparator</type>
|
<type>RegexpComparator</type>
|
||||||
<expected-output>^[ \t]*Create a zone to encode files using a specified schema( )*</expected-output>
|
<expected-output>^[ \t]*Create a zone to encode files using a specified policy( )*</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>RegexpComparator</type>
|
<type>RegexpComparator</type>
|
||||||
<expected-output>^-createZone \[-s <schemaName>\] \[-c <cellSize>\] <path>(.)*</expected-output>
|
<expected-output>^-createZone \[-s <policyName>\] <path>(.)*</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
</comparators>
|
</comparators>
|
||||||
</test>
|
</test>
|
||||||
|
@ -86,20 +86,20 @@
|
||||||
</test>
|
</test>
|
||||||
|
|
||||||
<test>
|
<test>
|
||||||
<description>help: listSchemas command</description>
|
<description>help: listPolicies command</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<ec-admin-command>-fs NAMENODE -help listSchemas</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -help listPolicies</ec-admin-command>
|
||||||
</test-commands>
|
</test-commands>
|
||||||
<cleanup-commands>
|
<cleanup-commands>
|
||||||
</cleanup-commands>
|
</cleanup-commands>
|
||||||
<comparators>
|
<comparators>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>SubstringComparator</type>
|
<type>SubstringComparator</type>
|
||||||
<expected-output>Get the list of ECSchemas supported</expected-output>
|
<expected-output>Get the list of erasure coding policies supported</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>RegexpComparator</type>
|
<type>RegexpComparator</type>
|
||||||
<expected-output>^-listSchemas (.)*</expected-output>
|
<expected-output>^-listPolicies (.)*</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
</comparators>
|
</comparators>
|
||||||
</test>
|
</test>
|
||||||
|
@ -109,7 +109,7 @@
|
||||||
<description>createZone : create a zone to encode files</description>
|
<description>createZone : create a zone to encode files</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
|
||||||
</test-commands>
|
</test-commands>
|
||||||
<cleanup-commands>
|
<cleanup-commands>
|
||||||
<command>-fs NAMENODE -rmdir /eczone</command>
|
<command>-fs NAMENODE -rmdir /eczone</command>
|
||||||
|
@ -141,7 +141,7 @@
|
||||||
</test>
|
</test>
|
||||||
|
|
||||||
<test>
|
<test>
|
||||||
<description>createZone : default schema</description>
|
<description>createZone : default policy</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||||
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
|
||||||
|
@ -153,7 +153,7 @@
|
||||||
<comparators>
|
<comparators>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>SubstringComparator</type>
|
<type>SubstringComparator</type>
|
||||||
<expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
|
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
</comparators>
|
</comparators>
|
||||||
</test>
|
</test>
|
||||||
|
@ -179,7 +179,7 @@
|
||||||
<description>getZone : get information about the EC zone at specified path</description>
|
<description>getZone : get information about the EC zone at specified path</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
|
||||||
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
|
||||||
</test-commands>
|
</test-commands>
|
||||||
<cleanup-commands>
|
<cleanup-commands>
|
||||||
|
@ -188,7 +188,7 @@
|
||||||
<comparators>
|
<comparators>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>SubstringComparator</type>
|
<type>SubstringComparator</type>
|
||||||
<expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
|
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
</comparators>
|
</comparators>
|
||||||
</test>
|
</test>
|
||||||
|
@ -197,7 +197,7 @@
|
||||||
<description>getZone : get EC zone at specified file path</description>
|
<description>getZone : get EC zone at specified file path</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||||
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
|
||||||
<command>-fs NAMENODE -touchz /eczone/ecfile</command>
|
<command>-fs NAMENODE -touchz /eczone/ecfile</command>
|
||||||
<ec-admin-command>-fs NAMENODE -getZone /eczone/ecfile</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -getZone /eczone/ecfile</ec-admin-command>
|
||||||
</test-commands>
|
</test-commands>
|
||||||
|
@ -208,15 +208,15 @@
|
||||||
<comparators>
|
<comparators>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>SubstringComparator</type>
|
<type>SubstringComparator</type>
|
||||||
<expected-output>Dir: /eczone, Schema: ECSchema=[Name=RS-6-3</expected-output>
|
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
</comparators>
|
</comparators>
|
||||||
</test>
|
</test>
|
||||||
|
|
||||||
<test>
|
<test>
|
||||||
<description>listSchemas : get the list of ECSchemas supported</description>
|
<description>listPolicies : get the list of ECPolicies supported</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<ec-admin-command>-fs NAMENODE -listSchemas</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -listPolicies</ec-admin-command>
|
||||||
</test-commands>
|
</test-commands>
|
||||||
<cleanup-commands>
|
<cleanup-commands>
|
||||||
</cleanup-commands>
|
</cleanup-commands>
|
||||||
|
@ -247,7 +247,7 @@
|
||||||
</test>
|
</test>
|
||||||
|
|
||||||
<test>
|
<test>
|
||||||
<description>createZone : illegal parameters - schema name is missing</description>
|
<description>createZone : illegal parameters - policy name is missing</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||||
<ec-admin-command>-fs NAMENODE -createZone -s</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -createZone -s</ec-admin-command>
|
||||||
|
@ -281,10 +281,10 @@
|
||||||
</test>
|
</test>
|
||||||
|
|
||||||
<test>
|
<test>
|
||||||
<description>createZone : illegal parameters - invalidschema</description>
|
<description>createZone : illegal parameters - invalidpolicy</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<command>-fs NAMENODE -mkdir /eczone</command>
|
<command>-fs NAMENODE -mkdir /eczone</command>
|
||||||
<ec-admin-command>-fs NAMENODE -createZone -s invalidschema /eczone</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -createZone -s invalidpolicy /eczone</ec-admin-command>
|
||||||
</test-commands>
|
</test-commands>
|
||||||
<cleanup-commands>
|
<cleanup-commands>
|
||||||
<command>-fs NAMENODE -rmdir /eczone</command>
|
<command>-fs NAMENODE -rmdir /eczone</command>
|
||||||
|
@ -292,7 +292,7 @@
|
||||||
<comparators>
|
<comparators>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>SubstringComparator</type>
|
<type>SubstringComparator</type>
|
||||||
<expected-output>Schema 'invalidschema' does not match any of the supported schemas. Please select any one of [RS-6-3]</expected-output>
|
<expected-output>Policy 'invalidpolicy' does not match any of the supported policies. Please select any one of [RS-6-3-64k]</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
</comparators>
|
</comparators>
|
||||||
</test>
|
</test>
|
||||||
|
@ -359,16 +359,16 @@
|
||||||
</test>
|
</test>
|
||||||
|
|
||||||
<test>
|
<test>
|
||||||
<description>listSchemas : illegal parameters - too many parameters</description>
|
<description>listPolicies : illegal parameters - too many parameters</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
<ec-admin-command>-fs NAMENODE -listSchemas /eczone</ec-admin-command>
|
<ec-admin-command>-fs NAMENODE -listPolicies /eczone</ec-admin-command>
|
||||||
</test-commands>
|
</test-commands>
|
||||||
<cleanup-commands>
|
<cleanup-commands>
|
||||||
</cleanup-commands>
|
</cleanup-commands>
|
||||||
<comparators>
|
<comparators>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>SubstringComparator</type>
|
<type>SubstringComparator</type>
|
||||||
<expected-output>-listSchemas: Too many parameters</expected-output>
|
<expected-output>-listPolicies: Too many parameters</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
</comparators>
|
</comparators>
|
||||||
</test>
|
</test>
|
||||||
|
|
Loading…
Reference in New Issue