HDFS-8129. Erasure Coding: Maintain consistent naming for Erasure Coding related classes - EC/ErasureCoding. Contributed by Uma Maheswara Rao G

This commit is contained in:
Uma Maheswara Rao G 2015-05-07 16:26:01 +05:30 committed by Zhe Zhang
parent cea46f79b0
commit ac97edd1ab
23 changed files with 110 additions and 107 deletions

View File

@ -183,3 +183,6 @@
HDFS-8334. Erasure coding: rename DFSStripedInputStream related test HDFS-8334. Erasure coding: rename DFSStripedInputStream related test
classes. (Zhe Zhang) classes. (Zhe Zhang)
HDFS-8129. Erasure Coding: Maintain consistent naming for Erasure Coding related classes - EC/ErasureCoding
(umamahesh)

View File

@ -119,8 +119,8 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator; import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -1193,7 +1193,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
// Get block info from namenode // Get block info from namenode
TraceScope scope = getPathTraceScope("newDFSInputStream", src); TraceScope scope = getPathTraceScope("newDFSInputStream", src);
try { try {
ECInfo info = getErasureCodingInfo(src); ErasureCodingInfo info = getErasureCodingInfo(src);
if (info != null) { if (info != null) {
return new DFSStripedInputStream(this, src, verifyChecksum, info); return new DFSStripedInputStream(this, src, verifyChecksum, info);
} else { } else {
@ -3134,7 +3134,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} }
} }
public ECInfo getErasureCodingInfo(String src) throws IOException { public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
checkOpen(); checkOpen();
TraceScope scope = getPathTraceScope("getErasureCodingInfo", src); TraceScope scope = getPathTraceScope("getErasureCodingInfo", src);
try { try {
@ -3356,7 +3356,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @return Returns the zone information if path is in EC Zone, null otherwise * @return Returns the zone information if path is in EC Zone, null otherwise
* @throws IOException * @throws IOException
*/ */
public ECZoneInfo getErasureCodingZoneInfo(String src) throws IOException { public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
checkOpen(); checkOpen();
TraceScope scope = getPathTraceScope("getErasureCodingZoneInfo", src); TraceScope scope = getPathTraceScope("getErasureCodingZoneInfo", src);
try { try {

View File

@ -127,7 +127,7 @@ public class DFSStripedInputStream extends DFSInputStream {
private final CompletionService<Integer> readingService; private final CompletionService<Integer> readingService;
DFSStripedInputStream(DFSClient dfsClient, String src, boolean verifyChecksum, DFSStripedInputStream(DFSClient dfsClient, String src, boolean verifyChecksum,
ECInfo ecInfo) throws IOException { ErasureCodingInfo ecInfo) throws IOException {
super(dfsClient, src, verifyChecksum); super(dfsClient, src, verifyChecksum);
// ECInfo is restored from NN just before reading striped file. // ECInfo is restored from NN just before reading striped file.
assert ecInfo != null; assert ecInfo != null;

View File

@ -75,7 +75,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -2315,18 +2315,18 @@ public class DistributedFileSystem extends FileSystem {
* @return Returns the zone information if path is in EC zone, null otherwise * @return Returns the zone information if path is in EC zone, null otherwise
* @throws IOException * @throws IOException
*/ */
public ECZoneInfo getErasureCodingZoneInfo(final Path path) public ErasureCodingZoneInfo getErasureCodingZoneInfo(final Path path)
throws IOException { throws IOException {
Path absF = fixRelativePart(path); Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<ECZoneInfo>() { return new FileSystemLinkResolver<ErasureCodingZoneInfo>() {
@Override @Override
public ECZoneInfo doCall(final Path p) throws IOException, public ErasureCodingZoneInfo doCall(final Path p) throws IOException,
UnresolvedLinkException { UnresolvedLinkException {
return dfs.getErasureCodingZoneInfo(getPathName(p)); return dfs.getErasureCodingZoneInfo(getPathName(p));
} }
@Override @Override
public ECZoneInfo next(final FileSystem fs, final Path p) public ErasureCodingZoneInfo next(final FileSystem fs, final Path p)
throws IOException { throws IOException {
if (fs instanceof DistributedFileSystem) { if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs; DistributedFileSystem myDfs = (DistributedFileSystem) fs;

View File

@ -1474,7 +1474,7 @@ public interface ClientProtocol {
* @throws IOException * @throws IOException
*/ */
@Idempotent @Idempotent
public ECInfo getErasureCodingInfo(String src) throws IOException; public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException;
/** /**
* Gets list of ECSchemas loaded in Namenode * Gets list of ECSchemas loaded in Namenode
@ -1492,5 +1492,5 @@ public interface ClientProtocol {
* @throws IOException * @throws IOException
*/ */
@Idempotent @Idempotent
public ECZoneInfo getErasureCodingZoneInfo(String src) throws IOException; public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException;
} }

View File

@ -22,11 +22,11 @@ import org.apache.hadoop.io.erasurecode.ECSchema;
/** /**
* Class to provide information, such as ECSchema, for a file/block. * Class to provide information, such as ECSchema, for a file/block.
*/ */
public class ECInfo { public class ErasureCodingInfo {
private final String src; private final String src;
private final ECSchema schema; private final ECSchema schema;
public ECInfo(String src, ECSchema schema) { public ErasureCodingInfo(String src, ECSchema schema) {
this.src = src; this.src = src;
this.schema = schema; this.schema = schema;
} }

View File

@ -21,12 +21,12 @@ import org.apache.hadoop.io.erasurecode.ECSchema;
/** /**
* Information about the EC Zone at the specified path. * Information about the EC Zone at the specified path.
*/ */
public class ECZoneInfo { public class ErasureCodingZoneInfo {
private String dir; private String dir;
private ECSchema schema; private ECSchema schema;
public ECZoneInfo(String dir, ECSchema schema) { public ErasureCodingZoneInfo(String dir, ECSchema schema) {
this.dir = dir; this.dir = dir;
this.schema = schema; this.schema = schema;
} }

View File

@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -202,8 +202,8 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptio
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECZoneInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECZoneInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
@ -1527,7 +1527,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public GetErasureCodingInfoResponseProto getErasureCodingInfo(RpcController controller, public GetErasureCodingInfoResponseProto getErasureCodingInfo(RpcController controller,
GetErasureCodingInfoRequestProto request) throws ServiceException { GetErasureCodingInfoRequestProto request) throws ServiceException {
try { try {
ECInfo ecInfo = server.getErasureCodingInfo(request.getSrc()); ErasureCodingInfo ecInfo = server.getErasureCodingInfo(request.getSrc());
GetErasureCodingInfoResponseProto.Builder resBuilder = GetErasureCodingInfoResponseProto GetErasureCodingInfoResponseProto.Builder resBuilder = GetErasureCodingInfoResponseProto
.newBuilder(); .newBuilder();
if (ecInfo != null) { if (ecInfo != null) {
@ -1556,11 +1556,11 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
@Override @Override
public GetECZoneInfoResponseProto getErasureCodingZoneInfo(RpcController controller, public GetErasureCodingZoneInfoResponseProto getErasureCodingZoneInfo(RpcController controller,
GetECZoneInfoRequestProto request) throws ServiceException { GetErasureCodingZoneInfoRequestProto request) throws ServiceException {
try { try {
ECZoneInfo ecZoneInfo = server.getErasureCodingZoneInfo(request.getSrc()); ErasureCodingZoneInfo ecZoneInfo = server.getErasureCodingZoneInfo(request.getSrc());
GetECZoneInfoResponseProto.Builder builder = GetECZoneInfoResponseProto.newBuilder(); GetErasureCodingZoneInfoResponseProto.Builder builder = GetErasureCodingZoneInfoResponseProto.newBuilder();
if (ecZoneInfo != null) { if (ecZoneInfo != null) {
builder.setECZoneInfo(PBHelper.convertECZoneInfo(ecZoneInfo)); builder.setECZoneInfo(PBHelper.convertECZoneInfo(ecZoneInfo));
} }

View File

@ -58,8 +58,8 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -168,8 +168,8 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathR
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECZoneInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECZoneInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
@ -1550,7 +1550,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public ECInfo getErasureCodingInfo(String src) throws IOException { public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
GetErasureCodingInfoRequestProto req = GetErasureCodingInfoRequestProto.newBuilder() GetErasureCodingInfoRequestProto req = GetErasureCodingInfoRequestProto.newBuilder()
.setSrc(src).build(); .setSrc(src).build();
try { try {
@ -1581,11 +1581,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public ECZoneInfo getErasureCodingZoneInfo(String src) throws IOException { public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
GetECZoneInfoRequestProto req = GetECZoneInfoRequestProto.newBuilder() GetErasureCodingZoneInfoRequestProto req = GetErasureCodingZoneInfoRequestProto.newBuilder()
.setSrc(src).build(); .setSrc(src).build();
try { try {
GetECZoneInfoResponseProto response = rpcProxy.getErasureCodingZoneInfo( GetErasureCodingZoneInfoResponseProto response = rpcProxy.getErasureCodingZoneInfo(
null, req); null, req);
if (response.hasECZoneInfo()) { if (response.hasECZoneInfo()) {
return PBHelper.convertECZoneInfo(response.getECZoneInfo()); return PBHelper.convertECZoneInfo(response.getECZoneInfo());

View File

@ -77,13 +77,13 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -135,10 +135,10 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterComm
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecoveryInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecoveryInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECSchemaOptionEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECSchemaOptionEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECSchemaProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECSchemaProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECZoneInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
@ -3117,13 +3117,13 @@ public class PBHelper {
build(); build();
} }
public static ECInfo convertECInfo(ECInfoProto ecInfoProto) { public static ErasureCodingInfo convertECInfo(ErasureCodingInfoProto ecInfoProto) {
return new ECInfo(ecInfoProto.getSrc(), return new ErasureCodingInfo(ecInfoProto.getSrc(),
convertECSchema(ecInfoProto.getSchema())); convertECSchema(ecInfoProto.getSchema()));
} }
public static ECInfoProto convertECInfo(ECInfo ecInfo) { public static ErasureCodingInfoProto convertECInfo(ErasureCodingInfo ecInfo) {
return ECInfoProto.newBuilder().setSrc(ecInfo.getSrc()) return ErasureCodingInfoProto.newBuilder().setSrc(ecInfo.getSrc())
.setSchema(convertECSchema(ecInfo.getSchema())).build(); .setSchema(convertECSchema(ecInfo.getSchema())).build();
} }
@ -3151,13 +3151,13 @@ public class PBHelper {
return builder.build(); return builder.build();
} }
public static ECZoneInfoProto convertECZoneInfo(ECZoneInfo ecZoneInfo) { public static ErasureCodingZoneInfoProto convertECZoneInfo(ErasureCodingZoneInfo ecZoneInfo) {
return ECZoneInfoProto.newBuilder().setDir(ecZoneInfo.getDir()) return ErasureCodingZoneInfoProto.newBuilder().setDir(ecZoneInfo.getDir())
.setSchema(convertECSchema(ecZoneInfo.getSchema())).build(); .setSchema(convertECSchema(ecZoneInfo.getSchema())).build();
} }
public static ECZoneInfo convertECZoneInfo(ECZoneInfoProto ecZoneInfoProto) { public static ErasureCodingZoneInfo convertECZoneInfo(ErasureCodingZoneInfoProto ecZoneInfoProto) {
return new ECZoneInfo(ecZoneInfoProto.getDir(), return new ErasureCodingZoneInfo(ecZoneInfoProto.getDir(),
convertECSchema(ecZoneInfoProto.getSchema())); convertECSchema(ecZoneInfoProto.getSchema()));
} }

View File

@ -31,7 +31,7 @@ import java.util.TreeMap;
* This class is instantiated by the FSNamesystem. * This class is instantiated by the FSNamesystem.
*/ */
@InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceAudience.LimitedPrivate({"HDFS"})
public final class ECSchemaManager { public final class ErasureCodingSchemaManager {
/** /**
* TODO: HDFS-8095 * TODO: HDFS-8095
@ -55,7 +55,7 @@ public final class ECSchemaManager {
*/ */
private final Map<String, ECSchema> activeSchemas; private final Map<String, ECSchema> activeSchemas;
ECSchemaManager() { ErasureCodingSchemaManager() {
this.activeSchemas = new TreeMap<String, ECSchema>(); this.activeSchemas = new TreeMap<String, ECSchema>();
for (ECSchema schema : SYS_SCHEMAS) { for (ECSchema schema : SYS_SCHEMAS) {

View File

@ -22,7 +22,7 @@ import com.google.common.collect.Lists;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ECSchema;
import java.io.IOException; import java.io.IOException;
@ -53,11 +53,11 @@ public class ErasureCodingZoneManager {
} }
ECSchema getECSchema(INodesInPath iip) throws IOException { ECSchema getECSchema(INodesInPath iip) throws IOException {
ECZoneInfo ecZoneInfo = getECZoneInfo(iip); ErasureCodingZoneInfo ecZoneInfo = getECZoneInfo(iip);
return ecZoneInfo == null ? null : ecZoneInfo.getSchema(); return ecZoneInfo == null ? null : ecZoneInfo.getSchema();
} }
ECZoneInfo getECZoneInfo(INodesInPath iip) throws IOException { ErasureCodingZoneInfo getECZoneInfo(INodesInPath iip) throws IOException {
assert dir.hasReadLock(); assert dir.hasReadLock();
Preconditions.checkNotNull(iip); Preconditions.checkNotNull(iip);
List<INode> inodes = iip.getReadOnlyINodes(); List<INode> inodes = iip.getReadOnlyINodes();
@ -79,9 +79,9 @@ public class ErasureCodingZoneManager {
for (XAttr xAttr : xAttrs) { for (XAttr xAttr : xAttrs) {
if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) { if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) {
String schemaName = new String(xAttr.getValue()); String schemaName = new String(xAttr.getValue());
ECSchema schema = dir.getFSNamesystem().getSchemaManager() ECSchema schema = dir.getFSNamesystem().getECSchemaManager()
.getSchema(schemaName); .getSchema(schemaName);
return new ECZoneInfo(inode.getFullPathName(), schema); return new ErasureCodingZoneInfo(inode.getFullPathName(), schema);
} }
} }
} }
@ -110,7 +110,7 @@ public class ErasureCodingZoneManager {
// System default schema will be used since no specified. // System default schema will be used since no specified.
if (schema == null) { if (schema == null) {
schema = ECSchemaManager.getSystemDefaultSchema(); schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
} }
// Now persist the schema name in xattr // Now persist the schema name in xattr

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
@ -1250,7 +1250,7 @@ public class FSDirectory implements Closeable {
} }
} }
ECZoneInfo getECZoneInfo(INodesInPath iip) throws IOException { ErasureCodingZoneInfo getECZoneInfo(INodesInPath iip) throws IOException {
readLock(); readLock();
try { try {
return ecZoneManager.getECZoneInfo(iip); return ecZoneManager.getECZoneInfo(iip);

View File

@ -180,8 +180,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -428,7 +428,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
private final BlockManager blockManager; private final BlockManager blockManager;
private final SnapshotManager snapshotManager; private final SnapshotManager snapshotManager;
private final CacheManager cacheManager; private final CacheManager cacheManager;
private final ECSchemaManager schemaManager; private final ErasureCodingSchemaManager ecSchemaManager;
private final DatanodeStatistics datanodeStatistics; private final DatanodeStatistics datanodeStatistics;
private String nameserviceId; private String nameserviceId;
@ -608,7 +608,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
leaseManager.removeAllLeases(); leaseManager.removeAllLeases();
snapshotManager.clearSnapshottableDirs(); snapshotManager.clearSnapshottableDirs();
cacheManager.clear(); cacheManager.clear();
schemaManager.clear(); ecSchemaManager.clear();
setImageLoaded(false); setImageLoaded(false);
blockManager.clear(); blockManager.clear();
} }
@ -848,7 +848,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
this.dir = new FSDirectory(this, conf); this.dir = new FSDirectory(this, conf);
this.snapshotManager = new SnapshotManager(dir); this.snapshotManager = new SnapshotManager(dir);
this.cacheManager = new CacheManager(this, conf, blockManager); this.cacheManager = new CacheManager(this, conf, blockManager);
this.schemaManager = new ECSchemaManager(); this.ecSchemaManager = new ErasureCodingSchemaManager();
this.safeMode = new SafeModeInfo(conf); this.safeMode = new SafeModeInfo(conf);
this.topConf = new TopConf(conf); this.topConf = new TopConf(conf);
this.auditLoggers = initAuditLoggers(conf); this.auditLoggers = initAuditLoggers(conf);
@ -6632,8 +6632,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
/** @return the schema manager. */ /** @return the schema manager. */
public ECSchemaManager getSchemaManager() { public ErasureCodingSchemaManager getECSchemaManager() {
return schemaManager; return ecSchemaManager;
} }
@Override // NameNodeMXBean @Override // NameNodeMXBean
@ -7579,11 +7579,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/** /**
* Get the erasure coding information for specified src * Get the erasure coding information for specified src
*/ */
ECInfo getErasureCodingInfo(String src) throws AccessControlException, ErasureCodingInfo getErasureCodingInfo(String src) throws AccessControlException,
UnresolvedLinkException, IOException { UnresolvedLinkException, IOException {
ECSchema schema = getECSchemaForPath(src); ECSchema schema = getECSchemaForPath(src);
if (schema != null) { if (schema != null) {
return new ECInfo(src, schema); return new ErasureCodingInfo(src, schema);
} }
return null; return null;
} }
@ -7591,7 +7591,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
/** /**
* Get the erasure coding zone information for specified path * Get the erasure coding zone information for specified path
*/ */
ECZoneInfo getErasureCodingZoneInfo(String src) throws AccessControlException, ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws AccessControlException,
UnresolvedLinkException, IOException { UnresolvedLinkException, IOException {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
final byte[][] pathComponents = FSDirectory final byte[][] pathComponents = FSDirectory
@ -7620,7 +7620,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
return schemaManager.getSchemas(); return ecSchemaManager.getSchemas();
} finally { } finally {
readUnlock(); readUnlock();
} }
@ -7635,7 +7635,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
return schemaManager.getSchema(schemaName); return ecSchemaManager.getSchema(schemaName);
} finally { } finally {
readUnlock(); readUnlock();
} }

View File

@ -84,8 +84,8 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException;
@ -2035,7 +2035,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
} }
@Override // ClientProtocol @Override // ClientProtocol
public ECInfo getErasureCodingInfo(String src) throws IOException { public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
checkNNStartup(); checkNNStartup();
return namesystem.getErasureCodingInfo(src); return namesystem.getErasureCodingInfo(src);
} }
@ -2047,7 +2047,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
} }
@Override // ClientProtocol @Override // ClientProtocol
public ECZoneInfo getErasureCodingZoneInfo(String src) throws IOException { public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
checkNNStartup(); checkNNStartup();
return namesystem.getErasureCodingZoneInfo(src); return namesystem.getErasureCodingZoneInfo(src);
} }

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFactory; import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.PathData; import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -164,7 +164,7 @@ public abstract class ECCommand extends Command {
super.processPath(item); super.processPath(item);
DistributedFileSystem dfs = (DistributedFileSystem) item.fs; DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
try { try {
ECZoneInfo ecZoneInfo = dfs.getErasureCodingZoneInfo(item.path); ErasureCodingZoneInfo ecZoneInfo = dfs.getErasureCodingZoneInfo(item.path);
out.println(ecZoneInfo.toString()); out.println(ecZoneInfo.toString());
} catch (IOException e) { } catch (IOException e) {
throw new IOException("Unable to create EC zone for the path " throw new IOException("Unable to create EC zone for the path "

View File

@ -867,6 +867,6 @@ service ClientNamenodeProtocol {
returns(GetErasureCodingInfoResponseProto); returns(GetErasureCodingInfoResponseProto);
rpc getECSchemas(GetECSchemasRequestProto) rpc getECSchemas(GetECSchemasRequestProto)
returns(GetECSchemasResponseProto); returns(GetECSchemasResponseProto);
rpc getErasureCodingZoneInfo(GetECZoneInfoRequestProto) rpc getErasureCodingZoneInfo(GetErasureCodingZoneInfoRequestProto)
returns(GetECZoneInfoResponseProto); returns(GetErasureCodingZoneInfoResponseProto);
} }

View File

@ -43,17 +43,17 @@ message ECSchemaProto {
} }
/** /**
* ECInfo * ErasureCodingInfo
*/ */
message ECInfoProto { message ErasureCodingInfoProto {
required string src = 1; required string src = 1;
required ECSchemaProto schema = 2; required ECSchemaProto schema = 2;
} }
/** /**
* ECZoneInfo * ErasureCodingZoneInfo
*/ */
message ECZoneInfoProto { message ErasureCodingZoneInfoProto {
required string dir = 1; required string dir = 1;
required ECSchemaProto schema = 2; required ECSchemaProto schema = 2;
} }
@ -71,7 +71,7 @@ message GetErasureCodingInfoRequestProto {
} }
message GetErasureCodingInfoResponseProto { message GetErasureCodingInfoResponseProto {
optional ECInfoProto ECInfo = 1; optional ErasureCodingInfoProto ECInfo = 1;
} }
message GetECSchemasRequestProto { // void request message GetECSchemasRequestProto { // void request
@ -81,12 +81,12 @@ message GetECSchemasResponseProto {
repeated ECSchemaProto schemas = 1; repeated ECSchemaProto schemas = 1;
} }
message GetECZoneInfoRequestProto { message GetErasureCodingZoneInfoRequestProto {
required string src = 1; // path to get the zone info required string src = 1; // path to get the zone info
} }
message GetECZoneInfoResponseProto { message GetErasureCodingZoneInfoResponseProto {
optional ECZoneInfoProto ECZoneInfo = 1; optional ErasureCodingZoneInfoProto ECZoneInfo = 1;
} }
/** /**

View File

@ -24,7 +24,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -34,7 +34,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.ECSchemaManager; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
@ -54,8 +54,8 @@ public class TestDFSStripedInputStream {
private DistributedFileSystem fs; private DistributedFileSystem fs;
private final Path dirPath = new Path("/striped"); private final Path dirPath = new Path("/striped");
private Path filePath = new Path(dirPath, "file"); private Path filePath = new Path(dirPath, "file");
private ECInfo info = new ECInfo(filePath.toString(), private ErasureCodingInfo info = new ErasureCodingInfo(filePath.toString(),
ECSchemaManager.getSystemDefaultSchema()); ErasureCodingSchemaManager.getSystemDefaultSchema());
private final short DATA_BLK_NUM = HdfsConstants.NUM_DATA_BLOCKS; private final short DATA_BLK_NUM = HdfsConstants.NUM_DATA_BLOCKS;
private final short PARITY_BLK_NUM = HdfsConstants.NUM_PARITY_BLOCKS; private final short PARITY_BLK_NUM = HdfsConstants.NUM_PARITY_BLOCKS;
private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE; private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;

View File

@ -22,7 +22,7 @@ import static org.junit.Assert.*;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.namenode.ECSchemaManager; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ECSchema;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.server.namenode.ECSchemaManager; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ECSchema;
@ -158,7 +158,7 @@ public class TestErasureCodingZones {
assertNull(fs.getClient().getErasureCodingInfo(src)); assertNull(fs.getClient().getErasureCodingInfo(src));
// dir ECInfo after creating ec zone // dir ECInfo after creating ec zone
fs.getClient().createErasureCodingZone(src, null); //Default one will be used. fs.getClient().createErasureCodingZone(src, null); //Default one will be used.
ECSchema sysDefaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema sysDefaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
verifyErasureCodingInfo(src, sysDefaultSchema); verifyErasureCodingInfo(src, sysDefaultSchema);
fs.create(new Path(ecDir, "/child1")).close(); fs.create(new Path(ecDir, "/child1")).close();
// verify for the files in ec zone // verify for the files in ec zone
@ -167,7 +167,7 @@ public class TestErasureCodingZones {
@Test @Test
public void testGetErasureCodingInfo() throws Exception { public void testGetErasureCodingInfo() throws Exception {
ECSchema[] sysSchemas = ECSchemaManager.getSystemSchemas(); ECSchema[] sysSchemas = ErasureCodingSchemaManager.getSystemSchemas();
assertTrue("System schemas should be of only 1 for now", assertTrue("System schemas should be of only 1 for now",
sysSchemas.length == 1); sysSchemas.length == 1);
@ -187,7 +187,7 @@ public class TestErasureCodingZones {
private void verifyErasureCodingInfo( private void verifyErasureCodingInfo(
String src, ECSchema usingSchema) throws IOException { String src, ECSchema usingSchema) throws IOException {
ECInfo ecInfo = fs.getClient().getErasureCodingInfo(src); ErasureCodingInfo ecInfo = fs.getClient().getErasureCodingInfo(src);
assertNotNull("ECInfo should have been non-null", ecInfo); assertNotNull("ECInfo should have been non-null", ecInfo);
assertEquals(src, ecInfo.getSrc()); assertEquals(src, ecInfo.getSrc());
ECSchema schema = ecInfo.getSchema(); ECSchema schema = ecInfo.getSchema();

View File

@ -71,7 +71,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.ECSchemaManager; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
@ -663,7 +663,7 @@ public class TestPBHelper {
short[] liveBlkIndices0 = new short[2]; short[] liveBlkIndices0 = new short[2];
BlockECRecoveryInfo blkECRecoveryInfo0 = new BlockECRecoveryInfo( BlockECRecoveryInfo blkECRecoveryInfo0 = new BlockECRecoveryInfo(
new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0, new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0,
liveBlkIndices0, ECSchemaManager.getSystemDefaultSchema()); liveBlkIndices0, ErasureCodingSchemaManager.getSystemDefaultSchema());
DatanodeInfo[] dnInfos1 = new DatanodeInfo[] { DatanodeInfo[] dnInfos1 = new DatanodeInfo[] {
DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() }; DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil
@ -677,7 +677,7 @@ public class TestPBHelper {
short[] liveBlkIndices1 = new short[2]; short[] liveBlkIndices1 = new short[2];
BlockECRecoveryInfo blkECRecoveryInfo1 = new BlockECRecoveryInfo( BlockECRecoveryInfo blkECRecoveryInfo1 = new BlockECRecoveryInfo(
new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1, new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1,
liveBlkIndices1, ECSchemaManager.getSystemDefaultSchema()); liveBlkIndices1, ErasureCodingSchemaManager.getSystemDefaultSchema());
List<BlockECRecoveryInfo> blkRecoveryInfosList = new ArrayList<BlockECRecoveryInfo>(); List<BlockECRecoveryInfo> blkRecoveryInfosList = new ArrayList<BlockECRecoveryInfo>();
blkRecoveryInfosList.add(blkECRecoveryInfo0); blkRecoveryInfosList.add(blkECRecoveryInfo0);
blkRecoveryInfosList.add(blkECRecoveryInfo1); blkRecoveryInfosList.add(blkECRecoveryInfo1);
@ -723,8 +723,8 @@ public class TestPBHelper {
ECSchema ecSchema2 = blkECRecoveryInfo2.getECSchema(); ECSchema ecSchema2 = blkECRecoveryInfo2.getECSchema();
// Compare ECSchemas same as default ECSchema as we used system default // Compare ECSchemas same as default ECSchema as we used system default
// ECSchema used in this test // ECSchema used in this test
compareECSchemas(ECSchemaManager.getSystemDefaultSchema(), ecSchema1); compareECSchemas(ErasureCodingSchemaManager.getSystemDefaultSchema(), ecSchema1);
compareECSchemas(ECSchemaManager.getSystemDefaultSchema(), ecSchema2); compareECSchemas(ErasureCodingSchemaManager.getSystemDefaultSchema(), ecSchema2);
} }
private void compareECSchemas(ECSchema ecSchema1, ECSchema ecSchema2) { private void compareECSchemas(ECSchema ecSchema1, ECSchema ecSchema2) {

View File

@ -60,7 +60,7 @@ public class TestStripedINodeFile {
@Test @Test
public void testBlockStripedTotalBlockCount() { public void testBlockStripedTotalBlockCount() {
ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
Block blk = new Block(1); Block blk = new Block(1);
BlockInfoStriped blockInfoStriped BlockInfoStriped blockInfoStriped
= new BlockInfoStriped(blk, = new BlockInfoStriped(blk,
@ -72,7 +72,7 @@ public class TestStripedINodeFile {
@Test @Test
public void testBlockStripedLength() public void testBlockStripedLength()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
@ -87,7 +87,7 @@ public class TestStripedINodeFile {
@Test @Test
public void testBlockStripedConsumedSpace() public void testBlockStripedConsumedSpace()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
@ -116,7 +116,7 @@ public class TestStripedINodeFile {
@Test @Test
public void testMultipleBlockStripedConsumedSpace() public void testMultipleBlockStripedConsumedSpace()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk1 = new Block(1); Block blk1 = new Block(1);
@ -141,7 +141,7 @@ public class TestStripedINodeFile {
@Test @Test
public void testBlockStripedFileSize() public void testBlockStripedFileSize()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
@ -160,7 +160,7 @@ public class TestStripedINodeFile {
@Test @Test
public void testBlockStripedUCFileSize() public void testBlockStripedUCFileSize()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
@ -177,7 +177,7 @@ public class TestStripedINodeFile {
@Test @Test
public void testBlockStripedComputeQuotaUsage() public void testBlockStripedComputeQuotaUsage()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
@ -204,7 +204,7 @@ public class TestStripedINodeFile {
@Test @Test
public void testBlockStripedUCComputeQuotaUsage() public void testBlockStripedUCComputeQuotaUsage()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ECSchemaManager.getSystemDefaultSchema(); ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);