HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy.

This commit is contained in:
Andrew Wang 2017-03-09 17:29:11 -08:00
parent e96a0b8c92
commit 819808a016
42 changed files with 121 additions and 141 deletions

View File

@ -129,16 +129,6 @@ public final class ErasureCodingPolicyManager {
return SYS_POLICIES;
}
/**
* Get system-wide default policy, which can be used by default
* when no policy is specified for a path.
* @return ecPolicy
*/
public static ErasureCodingPolicy getSystemDefaultPolicy() {
// make this configurable?
return SYS_POLICY1;
}
/**
* Get a policy by policy ID.
* @return ecPolicy, or null if not found

View File

@ -1910,7 +1910,7 @@ public class DFSTestUtil {
Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir)
throws Exception {
createStripedFile(cluster, file, dir, numBlocks, numStripesPerBlk,
toMkdir, ErasureCodingPolicyManager.getSystemDefaultPolicy());
toMkdir, StripedFileTestUtil.getDefaultECPolicy());
}
/**

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@ -42,9 +41,7 @@ import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
/**
@ -81,7 +78,7 @@ public class ErasureCodeBenchmarkThroughput
private static final String EC_FILE_BASE = "ec-file-";
private static final String TMP_FILE_SUFFIX = ".tmp";
private static final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private static final byte[] data = new byte[BUFFER_SIZE_MB * 1024 * 1024];
static {

View File

@ -29,9 +29,11 @@ import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream;
import org.apache.hadoop.io.IOUtils;
@ -558,4 +560,14 @@ public class StripedFileTestUtil {
throws IOException {
return fs.getClient().getLocatedBlocks(file.toString(), 0, Long.MAX_VALUE);
}
/**
* Get system-wide default Erasure Coding Policy, which can be
* used by default when no policy is specified for a path.
* @return ErasureCodingPolicy
*/
public static ErasureCodingPolicy getDefaultECPolicy() {
return ErasureCodingPolicyManager.getPolicyByID(
HdfsConstants.RS_6_3_POLICY_ID);
}
}

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
@ -76,7 +75,7 @@ public class TestDFSStripedInputStream {
public Timeout globalTimeout = new Timeout(300000);
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
return StripedFileTestUtil.getDefaultECPolicy();
}
@Before

View File

@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
@ -62,7 +61,7 @@ public class TestDFSStripedOutputStream {
public Timeout globalTimeout = new Timeout(300000);
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
return StripedFileTestUtil.getDefaultECPolicy();
}
@Before

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
@ -89,7 +88,7 @@ public class TestDFSStripedOutputStreamWithFailure {
9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
return StripedFileTestUtil.getDefaultECPolicy();
}
/*

View File

@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@ -83,7 +82,7 @@ public class TestDecommissionWithStriped {
private MiniDFSCluster cluster;
private DistributedFileSystem dfs;
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private int numDNs;
private final int cellSize = ecPolicy.getCellSize();
private final int dataBlocks = ecPolicy.getNumDataUnits();
@ -143,7 +142,7 @@ public class TestDecommissionWithStriped {
dfs.mkdirs(ecDir);
dfs.setErasureCodingPolicy(ecDir,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After

View File

@ -56,7 +56,7 @@ public class TestErasureCodingPolicies {
private DistributedFileSystem fs;
private static final int BLOCK_SIZE = 1024;
private static final ErasureCodingPolicy EC_POLICY =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private FSNamesystem namesystem;
@Rule
@ -95,7 +95,7 @@ public class TestErasureCodingPolicies {
// set ec policy on dir
fs.setErasureCodingPolicy(dir,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
// create a file which should be using ec
final Path ecSubDir = new Path(dir, "ecSubDir");
final Path ecFile = new Path(ecSubDir, "ecFile");
@ -270,7 +270,7 @@ public class TestErasureCodingPolicies {
final Path testDir = new Path("/ec");
fs.mkdir(testDir, FsPermission.getDirDefault());
fs.setErasureCodingPolicy(testDir,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
final Path fooFile = new Path(testDir, "foo");
// create ec file with replication=0
fs.create(fooFile, FsPermission.getFileDefault(), true,
@ -292,7 +292,7 @@ public class TestErasureCodingPolicies {
assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
// dir EC policy after setting
ErasureCodingPolicy sysDefaultECPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
fs.getClient().setErasureCodingPolicy(src, sysDefaultECPolicy.getName());
verifyErasureCodingInfo(src, sysDefaultECPolicy);
fs.create(new Path(ecDir, "child1")).close();

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Before;
@ -41,7 +40,7 @@ public class TestErasureCodingPolicyWithSnapshot {
private final static int SUCCESS = 0;
private final ErasureCodingPolicy sysDefaultPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final short groupSize = (short) (
sysDefaultPolicy.getNumDataUnits() +
sysDefaultPolicy.getNumParityUnits());

View File

@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -47,7 +46,7 @@ public class TestFileChecksum {
private static final Logger LOG = LoggerFactory
.getLogger(TestFileChecksum.class);
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private int dataBlocks = ecPolicy.getNumDataUnits();
private int parityBlocks = ecPolicy.getNumParityUnits();
@ -82,7 +81,7 @@ public class TestFileChecksum {
Path ecPath = new Path(ecDir);
cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault());
cluster.getFileSystem().getClient().setErasureCodingPolicy(ecDir,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
fs = cluster.getFileSystem();
client = fs.getClient();

View File

@ -26,7 +26,6 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.junit.After;
import org.junit.Before;
@ -71,7 +70,7 @@ public class TestFileStatusWithECPolicy {
assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
fs.delete(file, true);
final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy();
final ErasureCodingPolicy ecPolicy1 = StripedFileTestUtil.getDefaultECPolicy();
// set EC policy on dir
fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
@ -57,7 +56,7 @@ public class TestLeaseRecoveryStriped {
.getLog(TestLeaseRecoveryStriped.class);
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int dataBlocks = ecPolicy.getNumDataUnits();
private final int parityBlocks = ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@ -68,7 +67,7 @@ public class TestReadStripedFileWithDecoding {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks =
(short) ecPolicy.getNumParityUnits();
@ -103,7 +102,7 @@ public class TestReadStripedFileWithDecoding {
false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
fs = cluster.getFileSystem();
}

View File

@ -24,7 +24,6 @@ import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.junit.Assert;
import org.junit.Test;
import org.junit.Rule;
@ -43,7 +42,7 @@ public class TestReadStripedFileWithMissingBlocks {
private DistributedFileSystem fs;
private Configuration conf = new HdfsConfiguration();
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();

View File

@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
@ -62,7 +61,7 @@ public class TestReconstructStripedFile {
public static final Log LOG = LogFactory.getLog(TestReconstructStripedFile.class);
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int dataBlkNum = ecPolicy.getNumDataUnits();
private final int parityBlkNum = ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();
@ -108,7 +107,7 @@ public class TestReconstructStripedFile {
fs = cluster.getFileSystem();
fs.getClient().setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
List<DataNode> datanodes = cluster.getDataNodes();
for (int i = 0; i < dnNum; i++) {
@ -418,7 +417,7 @@ public class TestReconstructStripedFile {
BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(
new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices,
ErasureCodingPolicyManager.getSystemDefaultPolicy());
StripedFileTestUtil.getDefaultECPolicy());
List<BlockECReconstructionInfo> ecTasks = new ArrayList<>();
ecTasks.add(invalidECInfo);
dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.junit.After;
@ -44,7 +43,7 @@ import static org.junit.Assert.assertTrue;
public class TestSafeModeWithStripedFile {
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
private final int numDNs = dataBlocks + parityBlocks;
@ -64,7 +63,7 @@ public class TestSafeModeWithStripedFile {
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
cluster.waitActive();
}

View File

@ -51,8 +51,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
private MiniDFSCluster cluster;
private Configuration conf = new Configuration();
private DistributedFileSystem fs;
private ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager
.getSystemDefaultPolicy();
private ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();

View File

@ -27,7 +27,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.ipc.RemoteException;
@ -48,7 +47,7 @@ import java.util.Random;
public class TestWriteReadStripedFile {
public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class);
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int cellSize = ecPolicy.getCellSize();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
@ -81,7 +80,7 @@ public class TestWriteReadStripedFile {
fs = cluster.getFileSystem();
fs.mkdirs(new Path("/ec"));
cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After

View File

@ -24,7 +24,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
@ -47,7 +46,7 @@ public class TestWriteStripedFileWithFailure {
}
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
private final int numDNs = dataBlocks + parityBlocks;
@ -60,7 +59,7 @@ public class TestWriteStripedFileWithFailure {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
fs = cluster.getFileSystem();
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockType;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@ -77,7 +78,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
@ -228,7 +228,7 @@ public class TestPBHelper {
datanodeUuids, storageIDs, storageTypes);
if (isStriped) {
blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize());
StripedFileTestUtil.getDefaultECPolicy().getCellSize());
}
return blkLocs;
}
@ -720,7 +720,7 @@ public class TestPBHelper {
byte[] liveBlkIndices0 = new byte[2];
BlockECReconstructionInfo blkECRecoveryInfo0 = new BlockECReconstructionInfo(
new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0,
liveBlkIndices0, ErasureCodingPolicyManager.getSystemDefaultPolicy());
liveBlkIndices0, StripedFileTestUtil.getDefaultECPolicy());
DatanodeInfo[] dnInfos1 = new DatanodeInfo[] {
DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil
@ -734,7 +734,7 @@ public class TestPBHelper {
byte[] liveBlkIndices1 = new byte[2];
BlockECReconstructionInfo blkECRecoveryInfo1 = new BlockECReconstructionInfo(
new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1,
liveBlkIndices1, ErasureCodingPolicyManager.getSystemDefaultPolicy());
liveBlkIndices1, StripedFileTestUtil.getDefaultECPolicy());
List<BlockECReconstructionInfo> blkRecoveryInfosList = new ArrayList<BlockECReconstructionInfo>();
blkRecoveryInfosList.add(blkECRecoveryInfo0);
blkRecoveryInfosList.add(blkECRecoveryInfo1);
@ -823,8 +823,8 @@ public class TestPBHelper {
ErasureCodingPolicy ecPolicy2 = blkECRecoveryInfo2.getErasureCodingPolicy();
// Compare ECPolicies same as default ECPolicy as we used system default
// ECPolicy used in this test
compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy1);
compareECPolicies(ErasureCodingPolicyManager.getSystemDefaultPolicy(), ecPolicy2);
compareECPolicies(StripedFileTestUtil.getDefaultECPolicy(), ecPolicy1);
compareECPolicies(StripedFileTestUtil.getDefaultECPolicy(), ecPolicy2);
}
private void compareECPolicies(ErasureCodingPolicy ecPolicy1, ErasureCodingPolicy ecPolicy2) {

View File

@ -46,7 +46,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBER
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.junit.AfterClass;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@ -203,7 +202,7 @@ public class TestBalancer {
}
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int dataBlocks = ecPolicy.getNumDataUnits();
private final int parityBlocks = ecPolicy.getNumParityUnits();
private final int groupSize = dataBlocks + parityBlocks;
@ -1941,7 +1940,7 @@ public class TestBalancer {
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
client.setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
long totalCapacity = sum(capacities);

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.junit.Assert;
import org.junit.Rule;
@ -43,7 +43,7 @@ public class TestBlockInfoStriped {
private static final long BASE_ID = -1600;
private final Block baseBlock = new Block(BASE_ID);
private final ErasureCodingPolicy testECPolicy
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
= StripedFileTestUtil.getDefaultECPolicy();
private final int totalBlocks = testECPolicy.getNumDataUnits() +
testECPolicy.getNumParityUnits();
private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,

View File

@ -20,11 +20,11 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.net.ServerSocketUtil;
import org.junit.Rule;
@ -35,7 +35,7 @@ import java.io.IOException;
public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int dataBlocks = ecPolicy.getNumDataUnits();
private final int parityBlocks = ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();
@ -84,7 +84,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
.numDataNodes(numDNs)
.build();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
try {
cluster.waitActive();
doTestRead(conf, cluster, true);

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.Iterator;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.junit.Test;
@ -33,7 +33,7 @@ import static org.junit.Assert.fail;
public class TestLowRedundancyBlockQueues {
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private BlockInfo genBlockInfo(long id) {
return new BlockInfoContiguous(new Block(id), (short) 3);

View File

@ -23,13 +23,13 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.net.NetworkTopology;
@ -59,7 +59,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
}
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int cellSize = ecPolicy.getCellSize();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
@ -151,7 +151,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
cluster.waitActive();
fs = cluster.getFileSystem();
fs.setErasureCodingPolicy(new Path("/"),
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
FSNamesystem fsn = cluster.getNamesystem();
BlockManager bm = fsn.getBlockManager();
@ -222,7 +222,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
cluster.waitActive();
fs = cluster.getFileSystem();
fs.setErasureCodingPolicy(new Path("/"),
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
hosts[hosts.length - 1]);
@ -276,7 +276,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
cluster.waitActive();
fs = cluster.getFileSystem();
fs.setErasureCodingPolicy(new Path("/"),
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
final BlockManager bm = cluster.getNamesystem().getBlockManager();
final DatanodeManager dm = bm.getDatanodeManager();

View File

@ -37,9 +37,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
@ -58,7 +58,7 @@ public class TestSequentialBlockGroupId {
.getLog("TestSequentialBlockGroupId");
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final short REPLICATION = 1;
private final long SEED = 0;
private final int dataBlocks = ecPolicy.getNumDataUnits();
@ -89,7 +89,7 @@ public class TestSequentialBlockGroupId {
.getBlockIdManager().getBlockGroupIdGenerator();
fs.mkdirs(ecDir);
cluster.getFileSystem().getClient().setErasureCodingPolicy("/ecDir",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@ -34,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Time;
@ -54,7 +54,7 @@ public class TestSortLocatedStripedBlock {
.getLogger(TestSortLocatedStripedBlock.class);
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int cellSize = ecPolicy.getCellSize();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();

View File

@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@ -137,7 +138,7 @@ public class TestBlockRecovery {
public TestName currentTestName = new TestName();
private final int cellSize =
ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize();
StripedFileTestUtil.getDefaultECPolicy().getCellSize();
private final int bytesPerChecksum = 512;
private final int[][][] blockLengthsSuite = {
{{11 * cellSize, 10 * cellSize, 9 * cellSize, 8 * cellSize,

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
@ -55,7 +54,7 @@ public class TestDataNodeErasureCodingMetrics {
public static final Log LOG = LogFactory.
getLog(TestDataNodeErasureCodingMetrics.class);
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int dataBlocks = ecPolicy.getNumDataUnits();
private final int parityBlocks = ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();
@ -76,7 +75,7 @@ public class TestDataNodeErasureCodingMetrics {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
fs = cluster.getFileSystem();
}

View File

@ -78,7 +78,6 @@ import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.minikdc.MiniKdc;
@ -478,7 +477,7 @@ public class TestMover {
}
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int dataBlocks = ecPolicy.getNumDataUnits();
private final int parityBlocks = ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();
@ -538,7 +537,7 @@ public class TestMover {
HdfsConstants.HOT_STORAGE_POLICY_NAME);
// set an EC policy on "/bar" directory
client.setErasureCodingPolicy(barDir,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
// write file to barDir
final String fooFile = "/bar/foo";

View File

@ -56,7 +56,7 @@ public class TestAddOverReplicatedStripedBlocks {
private final Path dirPath = new Path("/striped");
private Path filePath = new Path(dirPath, "file");
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
private final short groupSize = (short) (dataBlocks + parityBlocks);
@ -82,7 +82,7 @@ public class TestAddOverReplicatedStripedBlocks {
fs = cluster.getFileSystem();
fs.mkdirs(dirPath);
fs.getClient().setErasureCodingPolicy(dirPath.toString(),
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After
@ -192,7 +192,7 @@ public class TestAddOverReplicatedStripedBlocks {
long groupId = bg.getBlock().getBlockId();
Block blk = new Block(groupId, blockSize, gs);
BlockInfoStriped blockInfo = new BlockInfoStriped(blk,
ErasureCodingPolicyManager.getSystemDefaultPolicy());
StripedFileTestUtil.getDefaultECPolicy());
for (int i = 0; i < groupSize; i++) {
blk.setBlockId(groupId + i);
cluster.injectBlocks(i, Arrays.asList(blk), bpid);

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
@ -45,7 +46,7 @@ import java.io.IOException;
public class TestAddStripedBlockInFBR {
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int cellSize = ecPolicy.getCellSize();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
@ -88,7 +89,7 @@ public class TestAddStripedBlockInFBR {
dfs.mkdirs(ecDir);
dfs.mkdirs(repDir);
dfs.getClient().setErasureCodingPolicy(ecDir.toString(),
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
// create several non-EC files and one EC file
final Path[] repFiles = new Path[groupSize];

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@ -67,7 +68,7 @@ import static org.junit.Assert.assertEquals;
public class TestAddStripedBlocks {
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();
@ -86,8 +87,8 @@ public class TestAddStripedBlocks {
.numDataNodes(groupSize).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
dfs.getClient().setErasureCodingPolicy("/", ErasureCodingPolicyManager
.getSystemDefaultPolicy().getName());
dfs.getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
@ -73,11 +74,11 @@ public class TestEnabledECPolicies {
// Test first with an invalid policy
expectInvalidPolicy("not-a-policy");
// Test with an invalid policy and a valid policy
expectInvalidPolicy("not-a-policy," + ErasureCodingPolicyManager
.getSystemDefaultPolicy().getName());
expectInvalidPolicy("not-a-policy," +
StripedFileTestUtil.getDefaultECPolicy().getName());
// Test with a valid and an invalid policy
expectInvalidPolicy(ErasureCodingPolicyManager
.getSystemDefaultPolicy().getName() + ", not-a-policy");
expectInvalidPolicy(
StripedFileTestUtil.getDefaultECPolicy().getName() + ", not-a-policy");
// Some more invalid values
expectInvalidPolicy("not-a-policy, ");
expectInvalidPolicy(" ,not-a-policy, ");
@ -85,8 +86,7 @@ public class TestEnabledECPolicies {
@Test
public void testValid() throws Exception {
String ecPolicyName = ErasureCodingPolicyManager.getSystemDefaultPolicy()
.getName();
String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy().getName();
expectValidPolicy(ecPolicyName, 1);
expectValidPolicy(ecPolicyName + ", ", 1);
expectValidPolicy(",", 0);

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@ -99,7 +100,7 @@ public class TestFSEditLogLoader {
private static final int NUM_DATA_NODES = 0;
private final ErasureCodingPolicy testECPolicy
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
= StripedFileTestUtil.getDefaultECPolicy();
@Test
public void testDisplayRecentEditLogOpCodes() throws IOException {

View File

@ -676,7 +676,7 @@ public class TestFsck {
setNumFiles(4).build();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
final int dataBlocks = ecPolicy.getNumDataUnits();
final int cellSize = ecPolicy.getCellSize();
final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits();
@ -1997,10 +1997,9 @@ public class TestFsck {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
int dataBlocks = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getNumDataUnits();
int parityBlocks = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getNumParityUnits();
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks =
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int totalSize = dataBlocks + parityBlocks;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
@ -2288,12 +2287,10 @@ public class TestFsck {
@Test (timeout = 300000)
public void testFsckCorruptECFile() throws Exception {
DistributedFileSystem fs = null;
int dataBlocks = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getNumDataUnits();
int parityBlocks = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getNumParityUnits();
int cellSize = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getCellSize();
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks =
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
int totalSize = dataBlocks + parityBlocks;
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(totalSize).build();
@ -2308,7 +2305,7 @@ public class TestFsck {
Path ecDirPath = new Path("/striped");
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
Path file = new Path(ecDirPath, "corrupted");
final int length = cellSize * dataBlocks;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
@ -2359,12 +2356,10 @@ public class TestFsck {
@Test (timeout = 300000)
public void testFsckMissingECFile() throws Exception {
DistributedFileSystem fs = null;
int dataBlocks = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getNumDataUnits();
int parityBlocks = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getNumParityUnits();
int cellSize = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getCellSize();
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks =
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
int totalSize = dataBlocks + parityBlocks;
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(totalSize).build();
@ -2374,7 +2369,7 @@ public class TestFsck {
Path ecDirPath = new Path("/striped");
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
Path file = new Path(ecDirPath, "missing");
final int length = cellSize * dataBlocks;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);

View File

@ -725,12 +725,10 @@ public class TestNameNodeMXBean {
DistributedFileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
int dataBlocks = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getNumDataUnits();
int parityBlocks = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getNumParityUnits();
int cellSize = ErasureCodingPolicyManager
.getSystemDefaultPolicy().getCellSize();
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks =
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
int totalSize = dataBlocks + parityBlocks;
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(totalSize).build();
@ -740,7 +738,7 @@ public class TestNameNodeMXBean {
Path ecDirPath = new Path("/striped");
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
Path file = new Path(ecDirPath, "corrupted");
final int length = cellSize * dataBlocks;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -46,7 +47,7 @@ public class TestQuotaWithStripedBlocks {
private static final int BLOCK_SIZE = 1024 * 1024;
private static final long DISK_QUOTA = BLOCK_SIZE * 10;
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int dataBlocks = ecPolicy.getNumDataUnits();
private final int parityBlocsk = ecPolicy.getNumParityUnits();
private final int groupSize = dataBlocks + parityBlocsk;

View File

@ -60,7 +60,7 @@ public class TestReconstructStripedBlocks {
public static final Logger LOG = LoggerFactory.getLogger(
TestReconstructStripedBlocks.class);
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private final int cellSize = ecPolicy.getCellSize();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
@ -202,7 +202,7 @@ public class TestReconstructStripedBlocks {
DistributedFileSystem fs = cluster.getFileSystem();
BlockManager bm = cluster.getNamesystem().getBlockManager();
fs.getClient().setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
int fileLen = dataBlocks * blockSize;
Path p = new Path("/test2RecoveryTasksForSameBlockGroup");
final byte[] data = new byte[fileLen];
@ -268,7 +268,7 @@ public class TestReconstructStripedBlocks {
try {
fs.mkdirs(dirPath);
fs.setErasureCodingPolicy(dirPath,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
DFSTestUtil.createFile(fs, filePath,
cellSize * dataBlocks * 2, (short) 1, 0L);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@ -321,7 +322,7 @@ public class TestStripedINodeFile {
// set erasure coding policy
dfs.setErasureCodingPolicy(ecDir,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED);
DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
final FSDirectory fsd = fsn.getFSDirectory();
@ -423,7 +424,7 @@ public class TestStripedINodeFile {
client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set an EC policy on "/foo" directory
client.setErasureCodingPolicy(fooDir,
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
// write file to fooDir
final String barFile = "/foo/bar";

View File

@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@ -46,7 +46,7 @@ import org.junit.Test;
public class TestOfflineImageViewerWithStripedBlocks {
private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy();
StripedFileTestUtil.getDefaultECPolicy();
private int dataBlocks = ecPolicy.getNumDataUnits();
private int parityBlocks = ecPolicy.getNumParityUnits();
@ -64,7 +64,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
StripedFileTestUtil.getDefaultECPolicy().getName());
fs = cluster.getFileSystem();
Path eczone = new Path("/eczone");
fs.mkdirs(eczone);
@ -144,7 +144,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
// Verify space consumed present in BlockInfoStriped
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
assertEquals(ErasureCodingPolicyManager.getSystemDefaultPolicy().getId(),
assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(),
fileNode.getErasureCodingPolicyID());
assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
long actualFileSize = 0;