diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java index 8d6ff858d4c..ffa0bcede9f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java @@ -38,4 +38,7 @@ private ErasureCodeConstants() { public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema( RS_LEGACY_CODEC_NAME, 6, 3); + + public static final ECSchema XOR_2_1_SCHEMA = new ECSchema( + XOR_CODEC_NAME, 2, 1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index acbc8f674b9..b55b4dfdb51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -147,6 +147,7 @@ public enum DatanodeReportType { public static final byte RS_6_3_POLICY_ID = 0; public static final byte RS_3_2_POLICY_ID = 1; public static final byte RS_6_3_LEGACY_POLICY_ID = 2; + public static final byte XOR_2_1_POLICY_ID = 3; /* Hidden constructor */ protected HdfsConstants() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index c4bc8defde6..8a85d230478 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -36,7 +36,7 @@ public final class ErasureCodingPolicyManager { /** - * TODO: HDFS-8095 + * TODO: HDFS-8095. */ private static final int DEFAULT_CELLSIZE = 64 * 1024; private static final ErasureCodingPolicy SYS_POLICY1 = @@ -48,10 +48,14 @@ public final class ErasureCodingPolicyManager { private static final ErasureCodingPolicy SYS_POLICY3 = new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA, DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID); + private static final ErasureCodingPolicy SYS_POLICY4 = + new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA, + DEFAULT_CELLSIZE, HdfsConstants.XOR_2_1_POLICY_ID); //We may add more later. private static final ErasureCodingPolicy[] SYS_POLICIES = - new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3}; + new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, + SYS_POLICY4}; // Supported storage policies for striped EC files private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = new byte[] { @@ -96,6 +100,19 @@ public static ErasureCodingPolicy getSystemDefaultPolicy() { return SYS_POLICY1; } + /** + * Get system-wide policy by policy ID. + * @return ecPolicy + */ + public static ErasureCodingPolicy getPolicyByPolicyID(byte id) { + for (ErasureCodingPolicy policy : SYS_POLICIES) { + if (policy.getId() == id) { + return policy; + } + } + return null; + } + /** * Get all policies that's available to use. * @return all policies @@ -141,7 +158,7 @@ public static boolean checkStoragePolicySuitableForECStripedMode( } /** - * Clear and clean up + * Clear and clean up. */ public void clear() { activePoliciesByName.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 37f97dbd7ad..a5dcee9c6b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -455,9 +455,13 @@ public short getPreferredBlockReplication() { if(!isStriped()){ return max; } - // TODO support more policies based on policyId + ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); + ErasureCodingPolicyManager.getPolicyByPolicyID( + getErasureCodingPolicyID()); + if (ecPolicy == null){ + ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy(); + } return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 13e2656be31..1fbc1d9207c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1888,21 +1888,41 @@ public static StorageReceivedDeletedBlocks[] makeReportForReceivedBlock( * Creates the metadata of a file in striped layout. This method only * manipulates the NameNode state without injecting data to DataNode. * You should disable periodical heartbeat before use this. - * @param file Path of the file to create + * @param file Path of the file to create * @param dir Parent path of the file * @param numBlocks Number of striped block groups to add to the file * @param numStripesPerBlk Number of striped cells in each block * @param toMkdir */ - public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, - int numBlocks, int numStripesPerBlk, boolean toMkdir) throws Exception { + public static void createStripedFile(MiniDFSCluster cluster, Path file, + Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir) + throws Exception { + createStripedFile(cluster, file, dir, numBlocks, numStripesPerBlk, + toMkdir, null); + } + + /** + * Creates the metadata of a file in striped layout. This method only + * manipulates the NameNode state without injecting data to DataNode. + * You should disable periodical heartbeat before use this. + * @param file Path of the file to create + * @param dir Parent path of the file + * @param numBlocks Number of striped block groups to add to the file + * @param numStripesPerBlk Number of striped cells in each block + * @param toMkdir + * @param ecPolicy erasure coding policy apply to created file. A null value + * means using default erasure coding policy. + */ + public static void createStripedFile(MiniDFSCluster cluster, Path file, + Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir, + ErasureCodingPolicy ecPolicy) throws Exception { DistributedFileSystem dfs = cluster.getFileSystem(); // If outer test already set EC policy, dir should be left as null if (toMkdir) { assert dir != null; dfs.mkdirs(dir); try { - dfs.getClient().setErasureCodingPolicy(dir.toString(), null); + dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy); } catch (IOException e) { if (!e.getMessage().contains("non-empty directory")) { throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index 3b46c665876..121b9a41383 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -64,20 +64,34 @@ public class TestDFSStripedInputStream { private DistributedFileSystem fs; private final Path dirPath = new Path("/striped"); private Path filePath = new Path(dirPath, "file"); - private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); - private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); - private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); - private final int cellSize = ecPolicy.getCellSize(); + private ErasureCodingPolicy ecPolicy; + private short dataBlocks; + private short parityBlocks; + private int cellSize; private final int stripesPerBlock = 2; - private final int blockSize = stripesPerBlock * cellSize; - private final int blockGroupSize = dataBlocks * blockSize; + private int blockSize; + private int blockGroupSize; @Rule public Timeout globalTimeout = new Timeout(300000); + public ErasureCodingPolicy getEcPolicy() { + return ErasureCodingPolicyManager.getSystemDefaultPolicy(); + } + @Before public void setup() throws IOException { + /* + * Initialize erasure coding policy. + */ + ecPolicy = getEcPolicy(); + dataBlocks = (short) ecPolicy.getNumDataUnits(); + parityBlocks = (short) ecPolicy.getNumParityUnits(); + cellSize = ecPolicy.getCellSize(); + blockSize = stripesPerBlock * cellSize; + blockGroupSize = dataBlocks * blockSize; + System.out.println("EC policy = " + ecPolicy); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); if (ErasureCodeNative.isNativeCodeLoaded()) { @@ -94,7 +108,7 @@ public void setup() throws IOException { } fs = cluster.getFileSystem(); fs.mkdirs(dirPath); - fs.getClient().setErasureCodingPolicy(dirPath.toString(), null); + fs.getClient().setErasureCodingPolicy(dirPath.toString(), ecPolicy); } @After @@ -106,13 +120,13 @@ public void tearDown() { } /** - * Test {@link DFSStripedInputStream#getBlockAt(long)} + * Test {@link DFSStripedInputStream#getBlockAt(long)}. */ @Test public void testRefreshBlock() throws Exception { final int numBlocks = 4; DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, - stripesPerBlock, false); + stripesPerBlock, false, ecPolicy); LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( filePath.toString(), 0, blockGroupSize * numBlocks); final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), @@ -136,7 +150,7 @@ public void testRefreshBlock() throws Exception { public void testPread() throws Exception { final int numBlocks = 2; DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, - stripesPerBlock, false); + stripesPerBlock, false, ecPolicy); LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( filePath.toString(), 0, blockGroupSize * numBlocks); int fileLen = blockGroupSize * numBlocks; @@ -154,7 +168,9 @@ public void testPread() throws Exception { bg.getBlock().getBlockPoolId()); } - /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */ + /** + * A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks + */ for (int i = 0; i < stripesPerBlock; i++) { for (int j = 0; j < dataBlocks; j++) { for (int k = 0; k < cellSize; k++) { @@ -194,7 +210,7 @@ public void testPreadWithDNFailure() throws Exception { final int numBlocks = 4; final int failedDNIdx = dataBlocks - 1; DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, - stripesPerBlock, false); + stripesPerBlock, false, ecPolicy); LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( filePath.toString(), 0, blockGroupSize); @@ -305,7 +321,7 @@ private void testStatefulRead(boolean useByteBuffer, setup(); } DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, - stripesPerBlock, false); + stripesPerBlock, false, ecPolicy); LocatedBlocks lbs = fs.getClient().namenode. getBlockLocations(filePath.toString(), 0, fileSize); @@ -330,7 +346,9 @@ private void testStatefulRead(boolean useByteBuffer, byte[] expected = new byte[fileSize]; for (LocatedBlock bg : lbs.getLocatedBlocks()) { - /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */ + /** + * A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks + */ for (int i = 0; i < stripesPerBlock; i++) { for (int j = 0; j < dataBlocks; j++) { for (int k = 0; k < cellSize; k++) { @@ -371,7 +389,7 @@ public void testStatefulReadWithDNFailure() throws Exception { final int numBlocks = 4; final int failedDNIdx = dataBlocks - 1; DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, - stripesPerBlock, false); + stripesPerBlock, false, ecPolicy); LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( filePath.toString(), 0, blockGroupSize); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index b686f28b4b8..5bde16efaf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -47,23 +47,36 @@ public class TestDFSStripedOutputStream { GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL); } - private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); - private final int dataBlocks = ecPolicy.getNumDataUnits(); - private final int parityBlocks = ecPolicy.getNumParityUnits(); + private ErasureCodingPolicy ecPolicy; + private int dataBlocks; + private int parityBlocks; private MiniDFSCluster cluster; private DistributedFileSystem fs; private Configuration conf; - private final int cellSize = ecPolicy.getCellSize(); + private int cellSize; private final int stripesPerBlock = 4; - private final int blockSize = cellSize * stripesPerBlock; + private int blockSize; @Rule public Timeout globalTimeout = new Timeout(300000); + public ErasureCodingPolicy getEcPolicy() { + return ErasureCodingPolicyManager.getSystemDefaultPolicy(); + } + @Before public void setup() throws IOException { + /* + * Initialize erasure coding policy. + */ + ecPolicy = getEcPolicy(); + dataBlocks = (short) ecPolicy.getNumDataUnits(); + parityBlocks = (short) ecPolicy.getNumParityUnits(); + cellSize = ecPolicy.getCellSize(); + blockSize = stripesPerBlock * cellSize; + System.out.println("EC policy = " + ecPolicy); + int numDNs = dataBlocks + parityBlocks + 2; conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); @@ -76,7 +89,7 @@ public void setup() throws IOException { NativeRSRawErasureCoderFactory.class.getCanonicalName()); } cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", ecPolicy); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java index cde07a433d0..0baf9cc3270 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java @@ -47,6 +47,7 @@ import org.apache.log4j.Level; import org.junit.Assert; import org.junit.Assume; +import org.junit.Before; import org.junit.Test; import java.io.IOException; @@ -76,18 +77,36 @@ public class TestDFSStripedOutputStreamWithFailure { .getLogger().setLevel(Level.ALL); } - private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemDefaultPolicy(); - private final int dataBlocks = ecPolicy.getNumDataUnits(); - private final int parityBlocks = ecPolicy.getNumParityUnits(); - private final int cellSize = ecPolicy.getCellSize(); + private ErasureCodingPolicy ecPolicy; + private int dataBlocks; + private int parityBlocks; + private int cellSize; private final int stripesPerBlock = 4; - private final int blockSize = cellSize * stripesPerBlock; - private final int blockGroupSize = blockSize * dataBlocks; + private int blockSize; + private int blockGroupSize; private static final int FLUSH_POS = 9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1; + public ErasureCodingPolicy getEcPolicy() { + return ErasureCodingPolicyManager.getSystemDefaultPolicy(); + } + + /* + * Initialize erasure coding policy. + */ + @Before + public void init(){ + ecPolicy = getEcPolicy(); + dataBlocks = ecPolicy.getNumDataUnits(); + parityBlocks = ecPolicy.getNumParityUnits(); + cellSize = ecPolicy.getCellSize(); + blockSize = cellSize * stripesPerBlock; + blockGroupSize = blockSize * dataBlocks; + dnIndexSuite = getDnIndexSuite(); + lengths = newLengths(); + } + List newLengths() { final List lens = new ArrayList<>(); lens.add(FLUSH_POS + 2); @@ -104,7 +123,7 @@ List newLengths() { return lens; } - private final int[][] dnIndexSuite = getDnIndexSuite(); + private int[][] dnIndexSuite; private int[][] getDnIndexSuite() { final int maxNumLevel = 2; @@ -167,7 +186,7 @@ private int[] getKillPositions(int fileLen, int num) { return positions; } - private final List lengths = newLengths(); + private List lengths; Integer getLength(int i) { return i >= 0 && i < lengths.size()? lengths.get(i): null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedInputStream.java new file mode 100644 index 00000000000..75062e000c0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedInputStream.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; + +/** + * This tests read operation of DFS striped file with XOR-2-1-64k erasure code + * policy. + */ +public class TestDFSXORStripedInputStream extends TestDFSStripedInputStream{ + + public ErasureCodingPolicy getEcPolicy() { + return ErasureCodingPolicyManager.getPolicyByPolicyID( + HdfsConstants.XOR_2_1_POLICY_ID); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStream.java new file mode 100644 index 00000000000..64bddb85607 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStream.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; + +/** + * This tests write operation of DFS striped file with XOR-2-1-64k erasure code + * policy. + */ +public class TestDFSXORStripedOutputStream extends TestDFSStripedOutputStream{ + + @Override + public ErasureCodingPolicy getEcPolicy() { + return ErasureCodingPolicyManager.getPolicyByPolicyID( + HdfsConstants.XOR_2_1_POLICY_ID); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStreamWithFailure.java new file mode 100644 index 00000000000..ed361a8594a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStreamWithFailure.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; + +/** + * This tests write operation of DFS striped file with XOR-2-1-64k erasure code + * policy when there is data node failure. + */ +public class TestDFSXORStripedOutputStreamWithFailure + extends TestDFSStripedOutputStreamWithFailure{ + + @Override + public ErasureCodingPolicy getEcPolicy() { + return ErasureCodingPolicyManager.getPolicyByPolicyID( + HdfsConstants.XOR_2_1_POLICY_ID); + } +} \ No newline at end of file