HDFS-8408. Revisit and refactor ErasureCodingInfo (Contributed by Vinayakumar B)

This commit is contained in:
Vinayakumar B 2015-05-25 11:57:17 +05:30 committed by Zhe Zhang
parent b30e96bfb4
commit 9a18598e2d
21 changed files with 117 additions and 337 deletions

View File

@ -257,3 +257,5 @@
HDFS-7768. Change fsck to support EC files. (Takanobu Asanuma via szetszwo)
HDFS-8382. Remove chunkSize and initialize from erasure coder. (Kai Zheng)
HDFS-8408. Revisit and refactor ErasureCodingInfo (vinayakumarb)

View File

@ -119,8 +119,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -3137,19 +3136,6 @@ public void checkAccess(String src, FsAction mode) throws IOException {
}
}
public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getErasureCodingInfo", src);
try {
return namenode.getErasureCodingInfo(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class, UnresolvedPathException.class);
} finally {
scope.close();
}
}
public ECSchema[] getECSchemas() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("getECSchemas", traceSampler);
@ -3359,11 +3345,11 @@ TraceScope getSrcDstTraceScope(String description, String src, String dst) {
* @return Returns the zone information if path is in EC Zone, null otherwise
* @throws IOException
*/
public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getErasureCodingZoneInfo", src);
TraceScope scope = getPathTraceScope("getErasureCodingZone", src);
try {
return namenode.getErasureCodingZoneInfo(src);
return namenode.getErasureCodingZone(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class, UnresolvedPathException.class);

View File

@ -75,7 +75,7 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -2316,25 +2316,25 @@ public Void next(final FileSystem fs, final Path p) throws IOException {
* @return Returns the zone information if path is in EC zone, null otherwise
* @throws IOException
*/
public ErasureCodingZoneInfo getErasureCodingZoneInfo(final Path path)
public ErasureCodingZone getErasureCodingZone(final Path path)
throws IOException {
Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<ErasureCodingZoneInfo>() {
return new FileSystemLinkResolver<ErasureCodingZone>() {
@Override
public ErasureCodingZoneInfo doCall(final Path p) throws IOException,
public ErasureCodingZone doCall(final Path p) throws IOException,
UnresolvedLinkException {
return dfs.getErasureCodingZoneInfo(getPathName(p));
return dfs.getErasureCodingZone(getPathName(p));
}
@Override
public ErasureCodingZoneInfo next(final FileSystem fs, final Path p)
public ErasureCodingZone next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
return myDfs.getErasureCodingZoneInfo(p);
return myDfs.getErasureCodingZone(p);
}
throw new UnsupportedOperationException(
"Cannot getErasureCodingZoneInfo through a symlink to a "
"Cannot getErasureCodingZone through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p);
}
}.resolve(this, absF);

View File

@ -1466,16 +1466,6 @@ public List<XAttr> listXAttrs(String src)
public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
throws IOException;
/**
* Gets the ECInfo for the specified file/directory
*
* @param src
* @return Returns the ECInfo if the file/directory is erasure coded, null otherwise
* @throws IOException
*/
@Idempotent
public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException;
/**
* Gets list of ECSchemas loaded in Namenode
*
@ -1492,5 +1482,5 @@ public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
* @throws IOException
*/
@Idempotent
public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException;
public ErasureCodingZone getErasureCodingZone(String src) throws IOException;
}

View File

@ -1,41 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.io.erasurecode.ECSchema;
/**
* Class to provide information, such as ECSchema, for a file/block.
*/
public class ErasureCodingInfo {
private final String src;
private final ECSchema schema;
public ErasureCodingInfo(String src, ECSchema schema) {
this.src = src;
this.schema = schema;
}
public String getSrc() {
return src;
}
public ECSchema getSchema() {
return schema;
}
}

View File

@ -1,66 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.io.erasurecode.ECSchema;
/**
* Information about the EC Zone at the specified path.
*/
public class ErasureCodingZoneInfo {
private String dir;
private ECSchema schema;
private int cellSize;
public ErasureCodingZoneInfo(String dir, ECSchema schema, int cellSize) {
this.dir = dir;
this.schema = schema;
this.cellSize = cellSize;
}
/**
* Get directory of the EC zone.
*
* @return
*/
public String getDir() {
return dir;
}
/**
* Get the schema for the EC Zone
*
* @return
*/
public ECSchema getSchema() {
return schema;
}
/**
* Get cellSize for the EC Zone
*/
public int getCellSize() {
return cellSize;
}
@Override
public String toString() {
return "Dir: " + getDir() + ", Schema: " + schema + ", cellSize: "
+ cellSize;
}
}

View File

@ -35,8 +35,7 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -202,10 +201,8 @@
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
@ -1524,22 +1521,6 @@ public GetEditsFromTxidResponseProto getEditsFromTxid(RpcController controller,
}
}
@Override
public GetErasureCodingInfoResponseProto getErasureCodingInfo(RpcController controller,
GetErasureCodingInfoRequestProto request) throws ServiceException {
try {
ErasureCodingInfo ecInfo = server.getErasureCodingInfo(request.getSrc());
GetErasureCodingInfoResponseProto.Builder resBuilder = GetErasureCodingInfoResponseProto
.newBuilder();
if (ecInfo != null) {
resBuilder.setECInfo(PBHelper.convertECInfo(ecInfo));
}
return resBuilder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetECSchemasResponseProto getECSchemas(RpcController controller,
GetECSchemasRequestProto request) throws ServiceException {
@ -1557,13 +1538,13 @@ public GetECSchemasResponseProto getECSchemas(RpcController controller,
}
@Override
public GetErasureCodingZoneInfoResponseProto getErasureCodingZoneInfo(RpcController controller,
GetErasureCodingZoneInfoRequestProto request) throws ServiceException {
public GetErasureCodingZoneResponseProto getErasureCodingZone(RpcController controller,
GetErasureCodingZoneRequestProto request) throws ServiceException {
try {
ErasureCodingZoneInfo ecZoneInfo = server.getErasureCodingZoneInfo(request.getSrc());
GetErasureCodingZoneInfoResponseProto.Builder builder = GetErasureCodingZoneInfoResponseProto.newBuilder();
if (ecZoneInfo != null) {
builder.setECZoneInfo(PBHelper.convertECZoneInfo(ecZoneInfo));
ErasureCodingZone ecZone = server.getErasureCodingZone(request.getSrc());
GetErasureCodingZoneResponseProto.Builder builder = GetErasureCodingZoneResponseProto.newBuilder();
if (ecZone != null) {
builder.setECZone(PBHelper.convertErasureCodingZone(ecZone));
}
return builder.build();
} catch (IOException e) {

View File

@ -58,8 +58,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -168,10 +167,8 @@
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
@ -1552,21 +1549,6 @@ public EventBatchList getEditsFromTxid(long txid) throws IOException {
}
}
@Override
public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
GetErasureCodingInfoRequestProto req = GetErasureCodingInfoRequestProto.newBuilder()
.setSrc(src).build();
try {
GetErasureCodingInfoResponseProto res = rpcProxy.getErasureCodingInfo(null, req);
if (res.hasECInfo()) {
return PBHelper.convertECInfo(res.getECInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public ECSchema[] getECSchemas() throws IOException {
try {
@ -1584,14 +1566,14 @@ public ECSchema[] getECSchemas() throws IOException {
}
@Override
public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
GetErasureCodingZoneInfoRequestProto req = GetErasureCodingZoneInfoRequestProto.newBuilder()
public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
GetErasureCodingZoneRequestProto req = GetErasureCodingZoneRequestProto.newBuilder()
.setSrc(src).build();
try {
GetErasureCodingZoneInfoResponseProto response = rpcProxy.getErasureCodingZoneInfo(
GetErasureCodingZoneResponseProto response = rpcProxy.getErasureCodingZone(
null, req);
if (response.hasECZoneInfo()) {
return PBHelper.convertECZoneInfo(response.getECZoneInfo());
if (response.hasECZone()) {
return PBHelper.convertErasureCodingZone(response.getECZone());
}
return null;
} catch (ServiceException e) {

View File

@ -77,13 +77,12 @@
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -135,8 +134,7 @@
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecoveryInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
@ -203,7 +201,6 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand;
@ -3123,16 +3120,6 @@ public static BlockReportContextProto convert(BlockReportContext context) {
build();
}
public static ErasureCodingInfo convertECInfo(ErasureCodingInfoProto ecInfoProto) {
return new ErasureCodingInfo(ecInfoProto.getSrc(),
convertECSchema(ecInfoProto.getSchema()));
}
public static ErasureCodingInfoProto convertECInfo(ErasureCodingInfo ecInfo) {
return ErasureCodingInfoProto.newBuilder().setSrc(ecInfo.getSrc())
.setSchema(convertECSchema(ecInfo.getSchema())).build();
}
public static ECSchema convertECSchema(ECSchemaProto schema) {
List<ECSchemaOptionEntryProto> optionsList = schema.getOptionsList();
Map<String, String> options = new HashMap<>(optionsList.size());
@ -3157,16 +3144,17 @@ public static ECSchemaProto convertECSchema(ECSchema schema) {
return builder.build();
}
public static ErasureCodingZoneInfoProto convertECZoneInfo(ErasureCodingZoneInfo ecZoneInfo) {
return ErasureCodingZoneInfoProto.newBuilder().setDir(ecZoneInfo.getDir())
.setSchema(convertECSchema(ecZoneInfo.getSchema()))
.setCellSize(ecZoneInfo.getCellSize()).build();
public static ErasureCodingZoneProto convertErasureCodingZone(
ErasureCodingZone ecZone) {
return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir())
.setSchema(convertECSchema(ecZone.getSchema()))
.setCellSize(ecZone.getCellSize()).build();
}
public static ErasureCodingZoneInfo convertECZoneInfo(ErasureCodingZoneInfoProto ecZoneInfoProto) {
return new ErasureCodingZoneInfo(ecZoneInfoProto.getDir(),
convertECSchema(ecZoneInfoProto.getSchema()),
ecZoneInfoProto.getCellSize());
public static ErasureCodingZone convertErasureCodingZone(
ErasureCodingZoneProto ecZoneProto) {
return new ErasureCodingZone(ecZoneProto.getDir(),
convertECSchema(ecZoneProto.getSchema()), ecZoneProto.getCellSize());
}
public static BlockECRecoveryInfo convertBlockECRecoveryInfo(

View File

@ -51,7 +51,7 @@
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -1555,14 +1555,14 @@ int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) {
assert rw instanceof ErasureCodingWork;
assert rw.targets.length > 0;
String src = block.getBlockCollection().getName();
ErasureCodingZoneInfo ecZoneInfo = null;
ErasureCodingZone ecZone = null;
try {
ecZoneInfo = namesystem.getErasureCodingZoneInfoForPath(src);
ecZone = namesystem.getErasureCodingZoneForPath(src);
} catch (IOException e) {
blockLog
.warn("Failed to get the EC zone info for the file {} ", src);
.warn("Failed to get the EC zone for the file {} ", src);
}
if (ecZoneInfo == null) {
if (ecZone == null) {
blockLog.warn("No EC schema found for the file {}. "
+ "So cannot proceed for recovery", src);
// TODO: we may have to revisit later for what we can do better to
@ -1573,7 +1573,7 @@ int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) {
new ExtendedBlock(namesystem.getBlockPoolId(), block),
rw.srcNodes, rw.targets,
((ErasureCodingWork) rw).liveBlockIndicies,
ecZoneInfo.getSchema(), ecZoneInfo.getCellSize());
ecZone.getSchema(), ecZone.getCellSize());
} else {
rw.srcNodes[0].addBlockToBeReplicated(block, targets);
}

View File

@ -23,7 +23,7 @@
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.WritableUtils;
@ -61,11 +61,11 @@ public ErasureCodingZoneManager(FSDirectory dir) {
}
ECSchema getECSchema(INodesInPath iip) throws IOException {
ErasureCodingZoneInfo ecZoneInfo = getECZoneInfo(iip);
return ecZoneInfo == null ? null : ecZoneInfo.getSchema();
ErasureCodingZone ecZone = getECZone(iip);
return ecZone == null ? null : ecZone.getSchema();
}
ErasureCodingZoneInfo getECZoneInfo(INodesInPath iip) throws IOException {
ErasureCodingZone getECZone(INodesInPath iip) throws IOException {
assert dir.hasReadLock();
Preconditions.checkNotNull(iip);
List<INode> inodes = iip.getReadOnlyINodes();
@ -92,7 +92,7 @@ ErasureCodingZoneInfo getECZoneInfo(INodesInPath iip) throws IOException {
String schemaName = WritableUtils.readString(dIn);
ECSchema schema = dir.getFSNamesystem().getECSchemaManager()
.getSchema(schemaName);
return new ErasureCodingZoneInfo(dir.getInode(inode.getId())
return new ErasureCodingZone(dir.getInode(inode.getId())
.getFullPathName(), schema, cellSize);
}
}

View File

@ -29,7 +29,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -386,9 +386,9 @@ static HdfsFileStatus createFileStatus(
final FileEncryptionInfo feInfo = isRawPath ? null :
fsd.getFileEncryptionInfo(node, snapshot, iip);
final ErasureCodingZoneInfo ecZoneInfo = fsd.getECZoneInfo(iip);
final ECSchema schema = ecZoneInfo != null ? ecZoneInfo.getSchema() : null;
final int cellSize = ecZoneInfo != null ? ecZoneInfo.getCellSize() : 0;
final ErasureCodingZone ecZone = fsd.getECZone(iip);
final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
if (node.isFile()) {
final INodeFile fileNode = node.asFile();
@ -468,9 +468,9 @@ private static HdfsLocatedFileStatus createLocatedFileStatus(
}
int childrenNum = node.isDirectory() ?
node.asDirectory().getChildrenNum(snapshot) : 0;
final ErasureCodingZoneInfo ecZoneInfo = fsd.getECZoneInfo(iip);
final ECSchema schema = ecZoneInfo != null ? ecZoneInfo.getSchema() : null;
final int cellSize = ecZoneInfo != null ? ecZoneInfo.getCellSize() : 0;
final ErasureCodingZone ecZone = fsd.getECZone(iip);
final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
HdfsLocatedFileStatus status =
new HdfsLocatedFileStatus(size, node.isDirectory(), replication,

View File

@ -42,7 +42,7 @@
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
@ -1253,10 +1253,10 @@ ECSchema getECSchema(INodesInPath iip) throws IOException {
}
}
ErasureCodingZoneInfo getECZoneInfo(INodesInPath iip) throws IOException {
ErasureCodingZone getECZone(INodesInPath iip) throws IOException {
readLock();
try {
return ecZoneManager.getECZoneInfo(iip);
return ecZoneManager.getECZone(iip);
} finally {
readUnlock();
}

View File

@ -181,8 +181,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -7623,28 +7622,16 @@ private boolean isInECZone(String src) throws IOException {
return dir.isInECZone(iip);
}
/**
* Get the erasure coding information for specified src
*/
ErasureCodingInfo getErasureCodingInfo(String src) throws AccessControlException,
UnresolvedLinkException, IOException {
ErasureCodingZoneInfo zoneInfo = getErasureCodingZoneInfo(src);
if (zoneInfo != null) {
return new ErasureCodingInfo(src, zoneInfo.getSchema());
}
return null;
}
/**
* Get the erasure coding zone information for specified path
*/
ErasureCodingZoneInfo getErasureCodingZoneInfo(String src)
ErasureCodingZone getErasureCodingZone(String src)
throws AccessControlException, UnresolvedLinkException, IOException {
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
return getErasureCodingZoneInfoForPath(src);
return getErasureCodingZoneForPath(src);
} finally {
readUnlock();
}
@ -7865,7 +7852,7 @@ private static void enableAsyncAuditLog() {
}
@Override
public ErasureCodingZoneInfo getErasureCodingZoneInfoForPath(String src)
public ErasureCodingZone getErasureCodingZoneForPath(String src)
throws IOException {
final byte[][] pathComponents = FSDirectory
.getPathComponentsForReservedPath(src);
@ -7875,7 +7862,7 @@ public ErasureCodingZoneInfo getErasureCodingZoneInfoForPath(String src)
if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.READ);
}
return dir.getECZoneInfo(iip);
return dir.getECZone(iip);
}
}

View File

@ -84,8 +84,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
@ -2035,12 +2034,6 @@ public void removeSpanReceiver(long id) throws IOException {
nn.spanReceiverHost.removeSpanReceiver(id);
}
@Override // ClientProtocol
public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
checkNNStartup();
return namesystem.getErasureCodingInfo(src);
}
@Override // ClientProtocol
public ECSchema[] getECSchemas() throws IOException {
checkNNStartup();
@ -2048,8 +2041,8 @@ public ECSchema[] getECSchemas() throws IOException {
}
@Override // ClientProtocol
public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
checkNNStartup();
return namesystem.getErasureCodingZoneInfo(src);
return namesystem.getErasureCodingZone(src);
}
}

View File

@ -21,11 +21,10 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.util.RwLock;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.AccessControlException;
@ -52,13 +51,12 @@ public interface Namesystem extends RwLock, SafeMode {
public boolean isInSnapshot(BlockCollection bc);
/**
* Gets the ECZone info for path
*
* Gets the ECZone for path
* @param src
* - path
* @return {@link ErasureCodingZoneInfo}
* @return {@link ErasureCodingZone}
* @throws IOException
*/
public ErasureCodingZoneInfo getErasureCodingZoneInfoForPath(String src)
public ErasureCodingZone getErasureCodingZoneForPath(String src)
throws IOException;
}

View File

@ -30,7 +30,7 @@
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.io.erasurecode.ECSchema;
@ -47,8 +47,8 @@ public static void registerCommands(CommandFactory factory) {
// Register all commands of Erasure CLI, with a '-' at the beginning in name
// of the command.
factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME);
factory.addClass(GetECZoneInfoCommand.class, "-"
+ GetECZoneInfoCommand.NAME);
factory.addClass(GetECZoneCommand.class, "-"
+ GetECZoneCommand.NAME);
factory.addClass(ListECSchemas.class, "-" + ListECSchemas.NAME);
}
@ -153,8 +153,8 @@ protected void processPath(PathData item) throws IOException {
/**
* Get the information about the zone
*/
static class GetECZoneInfoCommand extends ECCommand {
public static final String NAME = "getZoneInfo";
static class GetECZoneCommand extends ECCommand {
public static final String NAME = "getZone";
public static final String USAGE = "<path>";
public static final String DESCRIPTION =
"Get information about the EC zone at specified path\n";
@ -174,9 +174,9 @@ protected void processPath(PathData item) throws IOException {
super.processPath(item);
DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
try {
ErasureCodingZoneInfo ecZoneInfo = dfs.getErasureCodingZoneInfo(item.path);
if (ecZoneInfo != null) {
out.println(ecZoneInfo.toString());
ErasureCodingZone ecZone = dfs.getErasureCodingZone(item.path);
if (ecZone != null) {
out.println(ecZone.toString());
} else {
out.println("Path " + item.path + " is not in EC zone");
}

View File

@ -863,10 +863,8 @@ service ClientNamenodeProtocol {
returns(GetCurrentEditLogTxidResponseProto);
rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
returns(GetEditsFromTxidResponseProto);
rpc getErasureCodingInfo(GetErasureCodingInfoRequestProto)
returns(GetErasureCodingInfoResponseProto);
rpc getECSchemas(GetECSchemasRequestProto)
returns(GetECSchemasResponseProto);
rpc getErasureCodingZoneInfo(GetErasureCodingZoneInfoRequestProto)
returns(GetErasureCodingZoneInfoResponseProto);
rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
returns(GetErasureCodingZoneResponseProto);
}

View File

@ -24,17 +24,9 @@ package hadoop.hdfs;
import "hdfs.proto";
/**
* ErasureCodingInfo
* ErasureCodingZone
*/
message ErasureCodingInfoProto {
required string src = 1;
required ECSchemaProto schema = 2;
}
/**
* ErasureCodingZoneInfo
*/
message ErasureCodingZoneInfoProto {
message ErasureCodingZoneProto {
required string dir = 1;
required ECSchemaProto schema = 2;
required uint32 cellSize = 3;
@ -49,14 +41,6 @@ message CreateErasureCodingZoneRequestProto {
message CreateErasureCodingZoneResponseProto {
}
message GetErasureCodingInfoRequestProto {
required string src = 1;
}
message GetErasureCodingInfoResponseProto {
optional ErasureCodingInfoProto ECInfo = 1;
}
message GetECSchemasRequestProto { // void request
}
@ -64,12 +48,12 @@ message GetECSchemasResponseProto {
repeated ECSchemaProto schemas = 1;
}
message GetErasureCodingZoneInfoRequestProto {
message GetErasureCodingZoneRequestProto {
required string src = 1; // path to get the zone info
}
message GetErasureCodingZoneInfoResponseProto {
optional ErasureCodingZoneInfoProto ECZoneInfo = 1;
message GetErasureCodingZoneResponseProto {
optional ErasureCodingZoneProto ECZone = 1;
}
/**

View File

@ -21,7 +21,7 @@
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
@ -177,12 +177,12 @@ public void testGetErasureCodingInfoWithSystemDefaultSchema() throws Exception {
final Path ecDir = new Path(src);
fs.mkdir(ecDir, FsPermission.getDirDefault());
// dir ECInfo before creating ec zone
assertNull(fs.getClient().getErasureCodingInfo(src));
assertNull(fs.getClient().getFileInfo(src).getECSchema());
// dir ECInfo after creating ec zone
fs.getClient().createErasureCodingZone(src, null, 0); //Default one will be used.
ECSchema sysDefaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
verifyErasureCodingInfo(src, sysDefaultSchema);
fs.create(new Path(ecDir, "/child1")).close();
fs.create(new Path(ecDir, "child1")).close();
// verify for the files in ec zone
verifyErasureCodingInfo(src + "/child1", sysDefaultSchema);
}
@ -198,21 +198,19 @@ public void testGetErasureCodingInfo() throws Exception {
final Path ecDir = new Path(src);
fs.mkdir(ecDir, FsPermission.getDirDefault());
// dir ECInfo before creating ec zone
assertNull(fs.getClient().getErasureCodingInfo(src));
assertNull(fs.getClient().getFileInfo(src).getECSchema());
// dir ECInfo after creating ec zone
fs.getClient().createErasureCodingZone(src, usingSchema, 0);
verifyErasureCodingInfo(src, usingSchema);
fs.create(new Path(ecDir, "/child1")).close();
fs.create(new Path(ecDir, "child1")).close();
// verify for the files in ec zone
verifyErasureCodingInfo(src + "/child1", usingSchema);
}
private void verifyErasureCodingInfo(
String src, ECSchema usingSchema) throws IOException {
ErasureCodingInfo ecInfo = fs.getClient().getErasureCodingInfo(src);
assertNotNull("ECInfo should have been non-null", ecInfo);
assertEquals(src, ecInfo.getSrc());
ECSchema schema = ecInfo.getSchema();
HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
ECSchema schema = hdfsFileStatus.getECSchema();
assertNotNull(schema);
assertEquals("Actually used schema should be equal with target schema",
usingSchema, schema);

View File

@ -67,9 +67,9 @@
</test>
<test>
<description>help: getZoneInfo command</description>
<description>help: getZone command</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -help getZoneInfo</ec-admin-command>
<ec-admin-command>-fs NAMENODE -help getZone</ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
@ -80,7 +80,7 @@
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-getZoneInfo &lt;path&gt;(.)*</expected-output>
<expected-output>^-getZone &lt;path&gt;(.)*</expected-output>
</comparator>
</comparators>
</test>
@ -145,7 +145,7 @@
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
@ -159,10 +159,10 @@
</test>
<test>
<description>getZoneInfo : get information about the EC zone at specified path not in zone</description>
<description>getZone : get information about the EC zone at specified path not in zone</description>
<test-commands>
<command>-fs NAMENODE -mkdir /noec</command>
<ec-admin-command>-fs NAMENODE -getZoneInfo /noec</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone /noec</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /noec</command>
@ -176,11 +176,11 @@
</test>
<test>
<description>getZoneInfo : get information about the EC zone at specified path</description>
<description>getZone : get information about the EC zone at specified path</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
@ -194,12 +194,12 @@
</test>
<test>
<description>getZoneInfo : get EC zone at specified file path</description>
<description>getZone : get EC zone at specified file path</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
<command>-fs NAMENODE -touchz /eczone/ecfile</command>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone/ecfile</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone /eczone/ecfile</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /eczone/ecfile</command>
@ -313,24 +313,24 @@
</test>
<test>
<description>getZoneInfo : illegal parameters - path is missing</description>
<description>getZone : illegal parameters - path is missing</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -getZoneInfo </ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone </ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-getZoneInfo: &lt;path&gt; is missing(.)*</expected-output>
<expected-output>^-getZone: &lt;path&gt; is missing(.)*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getZoneInfo : illegal parameters - too many arguments</description>
<description>getZone : illegal parameters - too many arguments</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone /eczone /eczone</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /eczone</command>
@ -338,22 +338,22 @@
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>-getZoneInfo: Too many arguments</expected-output>
<expected-output>-getZone: Too many arguments</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getZoneInfo : illegal parameters - no such file</description>
<description>getZone : illegal parameters - no such file</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^getZoneInfo: `/eczone': No such file or directory(.)*</expected-output>
<expected-output>^getZone: `/eczone': No such file or directory(.)*</expected-output>
</comparator>
</comparators>
</test>