HDFS-8408. Revisit and refactor ErasureCodingInfo (Contributed by Vinayakumar B)

This commit is contained in:
Vinayakumar B 2015-05-25 11:57:17 +05:30 committed by Zhe Zhang
parent b30e96bfb4
commit 9a18598e2d
21 changed files with 117 additions and 337 deletions

View File

@ -257,3 +257,5 @@
HDFS-7768. Change fsck to support EC files. (Takanobu Asanuma via szetszwo) HDFS-7768. Change fsck to support EC files. (Takanobu Asanuma via szetszwo)
HDFS-8382. Remove chunkSize and initialize from erasure coder. (Kai Zheng) HDFS-8382. Remove chunkSize and initialize from erasure coder. (Kai Zheng)
HDFS-8408. Revisit and refactor ErasureCodingInfo (vinayakumarb)

View File

@ -119,8 +119,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator; import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -3137,19 +3136,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} }
} }
public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getErasureCodingInfo", src);
try {
return namenode.getErasureCodingInfo(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class, UnresolvedPathException.class);
} finally {
scope.close();
}
}
public ECSchema[] getECSchemas() throws IOException { public ECSchema[] getECSchemas() throws IOException {
checkOpen(); checkOpen();
TraceScope scope = Trace.startSpan("getECSchemas", traceSampler); TraceScope scope = Trace.startSpan("getECSchemas", traceSampler);
@ -3359,11 +3345,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @return Returns the zone information if path is in EC Zone, null otherwise * @return Returns the zone information if path is in EC Zone, null otherwise
* @throws IOException * @throws IOException
*/ */
public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException { public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
checkOpen(); checkOpen();
TraceScope scope = getPathTraceScope("getErasureCodingZoneInfo", src); TraceScope scope = getPathTraceScope("getErasureCodingZone", src);
try { try {
return namenode.getErasureCodingZoneInfo(src); return namenode.getErasureCodingZone(src);
} catch (RemoteException re) { } catch (RemoteException re) {
throw re.unwrapRemoteException(FileNotFoundException.class, throw re.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class, UnresolvedPathException.class); AccessControlException.class, UnresolvedPathException.class);

View File

@ -75,7 +75,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -2316,25 +2316,25 @@ public class DistributedFileSystem extends FileSystem {
* @return Returns the zone information if path is in EC zone, null otherwise * @return Returns the zone information if path is in EC zone, null otherwise
* @throws IOException * @throws IOException
*/ */
public ErasureCodingZoneInfo getErasureCodingZoneInfo(final Path path) public ErasureCodingZone getErasureCodingZone(final Path path)
throws IOException { throws IOException {
Path absF = fixRelativePart(path); Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<ErasureCodingZoneInfo>() { return new FileSystemLinkResolver<ErasureCodingZone>() {
@Override @Override
public ErasureCodingZoneInfo doCall(final Path p) throws IOException, public ErasureCodingZone doCall(final Path p) throws IOException,
UnresolvedLinkException { UnresolvedLinkException {
return dfs.getErasureCodingZoneInfo(getPathName(p)); return dfs.getErasureCodingZone(getPathName(p));
} }
@Override @Override
public ErasureCodingZoneInfo next(final FileSystem fs, final Path p) public ErasureCodingZone next(final FileSystem fs, final Path p)
throws IOException { throws IOException {
if (fs instanceof DistributedFileSystem) { if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs; DistributedFileSystem myDfs = (DistributedFileSystem) fs;
return myDfs.getErasureCodingZoneInfo(p); return myDfs.getErasureCodingZone(p);
} }
throw new UnsupportedOperationException( throw new UnsupportedOperationException(
"Cannot getErasureCodingZoneInfo through a symlink to a " "Cannot getErasureCodingZone through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p); + "non-DistributedFileSystem: " + path + " -> " + p);
} }
}.resolve(this, absF); }.resolve(this, absF);

View File

@ -1466,16 +1466,6 @@ public interface ClientProtocol {
public void createErasureCodingZone(String src, ECSchema schema, int cellSize) public void createErasureCodingZone(String src, ECSchema schema, int cellSize)
throws IOException; throws IOException;
/**
* Gets the ECInfo for the specified file/directory
*
* @param src
* @return Returns the ECInfo if the file/directory is erasure coded, null otherwise
* @throws IOException
*/
@Idempotent
public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException;
/** /**
* Gets list of ECSchemas loaded in Namenode * Gets list of ECSchemas loaded in Namenode
* *
@ -1492,5 +1482,5 @@ public interface ClientProtocol {
* @throws IOException * @throws IOException
*/ */
@Idempotent @Idempotent
public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException; public ErasureCodingZone getErasureCodingZone(String src) throws IOException;
} }

View File

@ -1,41 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.io.erasurecode.ECSchema;
/**
* Class to provide information, such as ECSchema, for a file/block.
*/
public class ErasureCodingInfo {
private final String src;
private final ECSchema schema;
public ErasureCodingInfo(String src, ECSchema schema) {
this.src = src;
this.schema = schema;
}
public String getSrc() {
return src;
}
public ECSchema getSchema() {
return schema;
}
}

View File

@ -1,66 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.io.erasurecode.ECSchema;
/**
* Information about the EC Zone at the specified path.
*/
public class ErasureCodingZoneInfo {
private String dir;
private ECSchema schema;
private int cellSize;
public ErasureCodingZoneInfo(String dir, ECSchema schema, int cellSize) {
this.dir = dir;
this.schema = schema;
this.cellSize = cellSize;
}
/**
* Get directory of the EC zone.
*
* @return
*/
public String getDir() {
return dir;
}
/**
* Get the schema for the EC Zone
*
* @return
*/
public ECSchema getSchema() {
return schema;
}
/**
* Get cellSize for the EC Zone
*/
public int getCellSize() {
return cellSize;
}
@Override
public String toString() {
return "Dir: " + getDir() + ", Schema: " + schema + ", cellSize: "
+ cellSize;
}
}

View File

@ -35,8 +35,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -202,10 +201,8 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptio
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
@ -1524,22 +1521,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
} }
@Override
public GetErasureCodingInfoResponseProto getErasureCodingInfo(RpcController controller,
GetErasureCodingInfoRequestProto request) throws ServiceException {
try {
ErasureCodingInfo ecInfo = server.getErasureCodingInfo(request.getSrc());
GetErasureCodingInfoResponseProto.Builder resBuilder = GetErasureCodingInfoResponseProto
.newBuilder();
if (ecInfo != null) {
resBuilder.setECInfo(PBHelper.convertECInfo(ecInfo));
}
return resBuilder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override @Override
public GetECSchemasResponseProto getECSchemas(RpcController controller, public GetECSchemasResponseProto getECSchemas(RpcController controller,
GetECSchemasRequestProto request) throws ServiceException { GetECSchemasRequestProto request) throws ServiceException {
@ -1557,13 +1538,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
@Override @Override
public GetErasureCodingZoneInfoResponseProto getErasureCodingZoneInfo(RpcController controller, public GetErasureCodingZoneResponseProto getErasureCodingZone(RpcController controller,
GetErasureCodingZoneInfoRequestProto request) throws ServiceException { GetErasureCodingZoneRequestProto request) throws ServiceException {
try { try {
ErasureCodingZoneInfo ecZoneInfo = server.getErasureCodingZoneInfo(request.getSrc()); ErasureCodingZone ecZone = server.getErasureCodingZone(request.getSrc());
GetErasureCodingZoneInfoResponseProto.Builder builder = GetErasureCodingZoneInfoResponseProto.newBuilder(); GetErasureCodingZoneResponseProto.Builder builder = GetErasureCodingZoneResponseProto.newBuilder();
if (ecZoneInfo != null) { if (ecZone != null) {
builder.setECZoneInfo(PBHelper.convertECZoneInfo(ecZoneInfo)); builder.setECZone(PBHelper.convertErasureCodingZone(ecZone));
} }
return builder.build(); return builder.build();
} catch (IOException e) { } catch (IOException e) {

View File

@ -58,8 +58,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -168,10 +167,8 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathR
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
@ -1552,21 +1549,6 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
} }
@Override
public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
GetErasureCodingInfoRequestProto req = GetErasureCodingInfoRequestProto.newBuilder()
.setSrc(src).build();
try {
GetErasureCodingInfoResponseProto res = rpcProxy.getErasureCodingInfo(null, req);
if (res.hasECInfo()) {
return PBHelper.convertECInfo(res.getECInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override @Override
public ECSchema[] getECSchemas() throws IOException { public ECSchema[] getECSchemas() throws IOException {
try { try {
@ -1584,14 +1566,14 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException { public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
GetErasureCodingZoneInfoRequestProto req = GetErasureCodingZoneInfoRequestProto.newBuilder() GetErasureCodingZoneRequestProto req = GetErasureCodingZoneRequestProto.newBuilder()
.setSrc(src).build(); .setSrc(src).build();
try { try {
GetErasureCodingZoneInfoResponseProto response = rpcProxy.getErasureCodingZoneInfo( GetErasureCodingZoneResponseProto response = rpcProxy.getErasureCodingZone(
null, req); null, req);
if (response.hasECZoneInfo()) { if (response.hasECZone()) {
return PBHelper.convertECZoneInfo(response.getECZoneInfo()); return PBHelper.convertErasureCodingZone(response.getECZone());
} }
return null; return null;
} catch (ServiceException e) { } catch (ServiceException e) {

View File

@ -77,13 +77,12 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -135,8 +134,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterComm
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecoveryInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecoveryInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
@ -203,7 +201,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand;
@ -3123,16 +3120,6 @@ public class PBHelper {
build(); build();
} }
public static ErasureCodingInfo convertECInfo(ErasureCodingInfoProto ecInfoProto) {
return new ErasureCodingInfo(ecInfoProto.getSrc(),
convertECSchema(ecInfoProto.getSchema()));
}
public static ErasureCodingInfoProto convertECInfo(ErasureCodingInfo ecInfo) {
return ErasureCodingInfoProto.newBuilder().setSrc(ecInfo.getSrc())
.setSchema(convertECSchema(ecInfo.getSchema())).build();
}
public static ECSchema convertECSchema(ECSchemaProto schema) { public static ECSchema convertECSchema(ECSchemaProto schema) {
List<ECSchemaOptionEntryProto> optionsList = schema.getOptionsList(); List<ECSchemaOptionEntryProto> optionsList = schema.getOptionsList();
Map<String, String> options = new HashMap<>(optionsList.size()); Map<String, String> options = new HashMap<>(optionsList.size());
@ -3157,16 +3144,17 @@ public class PBHelper {
return builder.build(); return builder.build();
} }
public static ErasureCodingZoneInfoProto convertECZoneInfo(ErasureCodingZoneInfo ecZoneInfo) { public static ErasureCodingZoneProto convertErasureCodingZone(
return ErasureCodingZoneInfoProto.newBuilder().setDir(ecZoneInfo.getDir()) ErasureCodingZone ecZone) {
.setSchema(convertECSchema(ecZoneInfo.getSchema())) return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir())
.setCellSize(ecZoneInfo.getCellSize()).build(); .setSchema(convertECSchema(ecZone.getSchema()))
.setCellSize(ecZone.getCellSize()).build();
} }
public static ErasureCodingZoneInfo convertECZoneInfo(ErasureCodingZoneInfoProto ecZoneInfoProto) { public static ErasureCodingZone convertErasureCodingZone(
return new ErasureCodingZoneInfo(ecZoneInfoProto.getDir(), ErasureCodingZoneProto ecZoneProto) {
convertECSchema(ecZoneInfoProto.getSchema()), return new ErasureCodingZone(ecZoneProto.getDir(),
ecZoneInfoProto.getCellSize()); convertECSchema(ecZoneProto.getSchema()), ecZoneProto.getCellSize());
} }
public static BlockECRecoveryInfo convertBlockECRecoveryInfo( public static BlockECRecoveryInfo convertBlockECRecoveryInfo(

View File

@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -1555,14 +1555,14 @@ public class BlockManager {
assert rw instanceof ErasureCodingWork; assert rw instanceof ErasureCodingWork;
assert rw.targets.length > 0; assert rw.targets.length > 0;
String src = block.getBlockCollection().getName(); String src = block.getBlockCollection().getName();
ErasureCodingZoneInfo ecZoneInfo = null; ErasureCodingZone ecZone = null;
try { try {
ecZoneInfo = namesystem.getErasureCodingZoneInfoForPath(src); ecZone = namesystem.getErasureCodingZoneForPath(src);
} catch (IOException e) { } catch (IOException e) {
blockLog blockLog
.warn("Failed to get the EC zone info for the file {} ", src); .warn("Failed to get the EC zone for the file {} ", src);
} }
if (ecZoneInfo == null) { if (ecZone == null) {
blockLog.warn("No EC schema found for the file {}. " blockLog.warn("No EC schema found for the file {}. "
+ "So cannot proceed for recovery", src); + "So cannot proceed for recovery", src);
// TODO: we may have to revisit later for what we can do better to // TODO: we may have to revisit later for what we can do better to
@ -1573,7 +1573,7 @@ public class BlockManager {
new ExtendedBlock(namesystem.getBlockPoolId(), block), new ExtendedBlock(namesystem.getBlockPoolId(), block),
rw.srcNodes, rw.targets, rw.srcNodes, rw.targets,
((ErasureCodingWork) rw).liveBlockIndicies, ((ErasureCodingWork) rw).liveBlockIndicies,
ecZoneInfo.getSchema(), ecZoneInfo.getCellSize()); ecZone.getSchema(), ecZone.getCellSize());
} else { } else {
rw.srcNodes[0].addBlockToBeReplicated(block, targets); rw.srcNodes[0].addBlockToBeReplicated(block, targets);
} }

View File

@ -23,7 +23,7 @@ import com.google.common.collect.Lists;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
@ -61,11 +61,11 @@ public class ErasureCodingZoneManager {
} }
ECSchema getECSchema(INodesInPath iip) throws IOException { ECSchema getECSchema(INodesInPath iip) throws IOException {
ErasureCodingZoneInfo ecZoneInfo = getECZoneInfo(iip); ErasureCodingZone ecZone = getECZone(iip);
return ecZoneInfo == null ? null : ecZoneInfo.getSchema(); return ecZone == null ? null : ecZone.getSchema();
} }
ErasureCodingZoneInfo getECZoneInfo(INodesInPath iip) throws IOException { ErasureCodingZone getECZone(INodesInPath iip) throws IOException {
assert dir.hasReadLock(); assert dir.hasReadLock();
Preconditions.checkNotNull(iip); Preconditions.checkNotNull(iip);
List<INode> inodes = iip.getReadOnlyINodes(); List<INode> inodes = iip.getReadOnlyINodes();
@ -92,7 +92,7 @@ public class ErasureCodingZoneManager {
String schemaName = WritableUtils.readString(dIn); String schemaName = WritableUtils.readString(dIn);
ECSchema schema = dir.getFSNamesystem().getECSchemaManager() ECSchema schema = dir.getFSNamesystem().getECSchemaManager()
.getSchema(schemaName); .getSchema(schemaName);
return new ErasureCodingZoneInfo(dir.getInode(inode.getId()) return new ErasureCodingZone(dir.getInode(inode.getId())
.getFullPathName(), schema, cellSize); .getFullPathName(), schema, cellSize);
} }
} }

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -386,9 +386,9 @@ class FSDirStatAndListingOp {
final FileEncryptionInfo feInfo = isRawPath ? null : final FileEncryptionInfo feInfo = isRawPath ? null :
fsd.getFileEncryptionInfo(node, snapshot, iip); fsd.getFileEncryptionInfo(node, snapshot, iip);
final ErasureCodingZoneInfo ecZoneInfo = fsd.getECZoneInfo(iip); final ErasureCodingZone ecZone = fsd.getECZone(iip);
final ECSchema schema = ecZoneInfo != null ? ecZoneInfo.getSchema() : null; final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
final int cellSize = ecZoneInfo != null ? ecZoneInfo.getCellSize() : 0; final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
if (node.isFile()) { if (node.isFile()) {
final INodeFile fileNode = node.asFile(); final INodeFile fileNode = node.asFile();
@ -468,9 +468,9 @@ class FSDirStatAndListingOp {
} }
int childrenNum = node.isDirectory() ? int childrenNum = node.isDirectory() ?
node.asDirectory().getChildrenNum(snapshot) : 0; node.asDirectory().getChildrenNum(snapshot) : 0;
final ErasureCodingZoneInfo ecZoneInfo = fsd.getECZoneInfo(iip); final ErasureCodingZone ecZone = fsd.getECZone(iip);
final ECSchema schema = ecZoneInfo != null ? ecZoneInfo.getSchema() : null; final ECSchema schema = ecZone != null ? ecZone.getSchema() : null;
final int cellSize = ecZoneInfo != null ? ecZoneInfo.getCellSize() : 0; final int cellSize = ecZone != null ? ecZone.getCellSize() : 0;
HdfsLocatedFileStatus status = HdfsLocatedFileStatus status =
new HdfsLocatedFileStatus(size, node.isDirectory(), replication, new HdfsLocatedFileStatus(size, node.isDirectory(), replication,

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
@ -1253,10 +1253,10 @@ public class FSDirectory implements Closeable {
} }
} }
ErasureCodingZoneInfo getECZoneInfo(INodesInPath iip) throws IOException { ErasureCodingZone getECZone(INodesInPath iip) throws IOException {
readLock(); readLock();
try { try {
return ecZoneManager.getECZoneInfo(iip); return ecZoneManager.getECZone(iip);
} finally { } finally {
readUnlock(); readUnlock();
} }

View File

@ -181,8 +181,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -7623,28 +7622,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return dir.isInECZone(iip); return dir.isInECZone(iip);
} }
/**
* Get the erasure coding information for specified src
*/
ErasureCodingInfo getErasureCodingInfo(String src) throws AccessControlException,
UnresolvedLinkException, IOException {
ErasureCodingZoneInfo zoneInfo = getErasureCodingZoneInfo(src);
if (zoneInfo != null) {
return new ErasureCodingInfo(src, zoneInfo.getSchema());
}
return null;
}
/** /**
* Get the erasure coding zone information for specified path * Get the erasure coding zone information for specified path
*/ */
ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) ErasureCodingZone getErasureCodingZone(String src)
throws AccessControlException, UnresolvedLinkException, IOException { throws AccessControlException, UnresolvedLinkException, IOException {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
return getErasureCodingZoneInfoForPath(src); return getErasureCodingZoneForPath(src);
} finally { } finally {
readUnlock(); readUnlock();
} }
@ -7865,7 +7852,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
@Override @Override
public ErasureCodingZoneInfo getErasureCodingZoneInfoForPath(String src) public ErasureCodingZone getErasureCodingZoneForPath(String src)
throws IOException { throws IOException {
final byte[][] pathComponents = FSDirectory final byte[][] pathComponents = FSDirectory
.getPathComponentsForReservedPath(src); .getPathComponentsForReservedPath(src);
@ -7875,7 +7862,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if (isPermissionEnabled) { if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.READ); dir.checkPathAccess(pc, iip, FsAction.READ);
} }
return dir.getECZoneInfo(iip); return dir.getECZone(iip);
} }
} }

View File

@ -84,8 +84,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.FSLimitException;
@ -2035,12 +2034,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
nn.spanReceiverHost.removeSpanReceiver(id); nn.spanReceiverHost.removeSpanReceiver(id);
} }
@Override // ClientProtocol
public ErasureCodingInfo getErasureCodingInfo(String src) throws IOException {
checkNNStartup();
return namesystem.getErasureCodingInfo(src);
}
@Override // ClientProtocol @Override // ClientProtocol
public ECSchema[] getECSchemas() throws IOException { public ECSchema[] getECSchemas() throws IOException {
checkNNStartup(); checkNNStartup();
@ -2048,8 +2041,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
} }
@Override // ClientProtocol @Override // ClientProtocol
public ErasureCodingZoneInfo getErasureCodingZoneInfo(String src) throws IOException { public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
checkNNStartup(); checkNNStartup();
return namesystem.getErasureCodingZoneInfo(src); return namesystem.getErasureCodingZone(src);
} }
} }

View File

@ -21,11 +21,10 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.hdfs.util.RwLock;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
@ -52,13 +51,12 @@ public interface Namesystem extends RwLock, SafeMode {
public boolean isInSnapshot(BlockCollection bc); public boolean isInSnapshot(BlockCollection bc);
/** /**
* Gets the ECZone info for path * Gets the ECZone for path
*
* @param src * @param src
* - path * - path
* @return {@link ErasureCodingZoneInfo} * @return {@link ErasureCodingZone}
* @throws IOException * @throws IOException
*/ */
public ErasureCodingZoneInfo getErasureCodingZoneInfoForPath(String src) public ErasureCodingZone getErasureCodingZoneForPath(String src)
throws IOException; throws IOException;
} }

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFactory; import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.PathData; import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZoneInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ECSchema;
@ -47,8 +47,8 @@ public abstract class ECCommand extends Command {
// Register all commands of Erasure CLI, with a '-' at the beginning in name // Register all commands of Erasure CLI, with a '-' at the beginning in name
// of the command. // of the command.
factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME); factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME);
factory.addClass(GetECZoneInfoCommand.class, "-" factory.addClass(GetECZoneCommand.class, "-"
+ GetECZoneInfoCommand.NAME); + GetECZoneCommand.NAME);
factory.addClass(ListECSchemas.class, "-" + ListECSchemas.NAME); factory.addClass(ListECSchemas.class, "-" + ListECSchemas.NAME);
} }
@ -153,8 +153,8 @@ public abstract class ECCommand extends Command {
/** /**
* Get the information about the zone * Get the information about the zone
*/ */
static class GetECZoneInfoCommand extends ECCommand { static class GetECZoneCommand extends ECCommand {
public static final String NAME = "getZoneInfo"; public static final String NAME = "getZone";
public static final String USAGE = "<path>"; public static final String USAGE = "<path>";
public static final String DESCRIPTION = public static final String DESCRIPTION =
"Get information about the EC zone at specified path\n"; "Get information about the EC zone at specified path\n";
@ -174,9 +174,9 @@ public abstract class ECCommand extends Command {
super.processPath(item); super.processPath(item);
DistributedFileSystem dfs = (DistributedFileSystem) item.fs; DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
try { try {
ErasureCodingZoneInfo ecZoneInfo = dfs.getErasureCodingZoneInfo(item.path); ErasureCodingZone ecZone = dfs.getErasureCodingZone(item.path);
if (ecZoneInfo != null) { if (ecZone != null) {
out.println(ecZoneInfo.toString()); out.println(ecZone.toString());
} else { } else {
out.println("Path " + item.path + " is not in EC zone"); out.println("Path " + item.path + " is not in EC zone");
} }

View File

@ -863,10 +863,8 @@ service ClientNamenodeProtocol {
returns(GetCurrentEditLogTxidResponseProto); returns(GetCurrentEditLogTxidResponseProto);
rpc getEditsFromTxid(GetEditsFromTxidRequestProto) rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
returns(GetEditsFromTxidResponseProto); returns(GetEditsFromTxidResponseProto);
rpc getErasureCodingInfo(GetErasureCodingInfoRequestProto)
returns(GetErasureCodingInfoResponseProto);
rpc getECSchemas(GetECSchemasRequestProto) rpc getECSchemas(GetECSchemasRequestProto)
returns(GetECSchemasResponseProto); returns(GetECSchemasResponseProto);
rpc getErasureCodingZoneInfo(GetErasureCodingZoneInfoRequestProto) rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
returns(GetErasureCodingZoneInfoResponseProto); returns(GetErasureCodingZoneResponseProto);
} }

View File

@ -24,17 +24,9 @@ package hadoop.hdfs;
import "hdfs.proto"; import "hdfs.proto";
/** /**
* ErasureCodingInfo * ErasureCodingZone
*/ */
message ErasureCodingInfoProto { message ErasureCodingZoneProto {
required string src = 1;
required ECSchemaProto schema = 2;
}
/**
* ErasureCodingZoneInfo
*/
message ErasureCodingZoneInfoProto {
required string dir = 1; required string dir = 1;
required ECSchemaProto schema = 2; required ECSchemaProto schema = 2;
required uint32 cellSize = 3; required uint32 cellSize = 3;
@ -49,14 +41,6 @@ message CreateErasureCodingZoneRequestProto {
message CreateErasureCodingZoneResponseProto { message CreateErasureCodingZoneResponseProto {
} }
message GetErasureCodingInfoRequestProto {
required string src = 1;
}
message GetErasureCodingInfoResponseProto {
optional ErasureCodingInfoProto ECInfo = 1;
}
message GetECSchemasRequestProto { // void request message GetECSchemasRequestProto { // void request
} }
@ -64,12 +48,12 @@ message GetECSchemasResponseProto {
repeated ECSchemaProto schemas = 1; repeated ECSchemaProto schemas = 1;
} }
message GetErasureCodingZoneInfoRequestProto { message GetErasureCodingZoneRequestProto {
required string src = 1; // path to get the zone info required string src = 1; // path to get the zone info
} }
message GetErasureCodingZoneInfoResponseProto { message GetErasureCodingZoneResponseProto {
optional ErasureCodingZoneInfoProto ECZoneInfo = 1; optional ErasureCodingZoneProto ECZone = 1;
} }
/** /**

View File

@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.ErasureCodingInfo; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
@ -177,12 +177,12 @@ public class TestErasureCodingZones {
final Path ecDir = new Path(src); final Path ecDir = new Path(src);
fs.mkdir(ecDir, FsPermission.getDirDefault()); fs.mkdir(ecDir, FsPermission.getDirDefault());
// dir ECInfo before creating ec zone // dir ECInfo before creating ec zone
assertNull(fs.getClient().getErasureCodingInfo(src)); assertNull(fs.getClient().getFileInfo(src).getECSchema());
// dir ECInfo after creating ec zone // dir ECInfo after creating ec zone
fs.getClient().createErasureCodingZone(src, null, 0); //Default one will be used. fs.getClient().createErasureCodingZone(src, null, 0); //Default one will be used.
ECSchema sysDefaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema(); ECSchema sysDefaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
verifyErasureCodingInfo(src, sysDefaultSchema); verifyErasureCodingInfo(src, sysDefaultSchema);
fs.create(new Path(ecDir, "/child1")).close(); fs.create(new Path(ecDir, "child1")).close();
// verify for the files in ec zone // verify for the files in ec zone
verifyErasureCodingInfo(src + "/child1", sysDefaultSchema); verifyErasureCodingInfo(src + "/child1", sysDefaultSchema);
} }
@ -198,21 +198,19 @@ public class TestErasureCodingZones {
final Path ecDir = new Path(src); final Path ecDir = new Path(src);
fs.mkdir(ecDir, FsPermission.getDirDefault()); fs.mkdir(ecDir, FsPermission.getDirDefault());
// dir ECInfo before creating ec zone // dir ECInfo before creating ec zone
assertNull(fs.getClient().getErasureCodingInfo(src)); assertNull(fs.getClient().getFileInfo(src).getECSchema());
// dir ECInfo after creating ec zone // dir ECInfo after creating ec zone
fs.getClient().createErasureCodingZone(src, usingSchema, 0); fs.getClient().createErasureCodingZone(src, usingSchema, 0);
verifyErasureCodingInfo(src, usingSchema); verifyErasureCodingInfo(src, usingSchema);
fs.create(new Path(ecDir, "/child1")).close(); fs.create(new Path(ecDir, "child1")).close();
// verify for the files in ec zone // verify for the files in ec zone
verifyErasureCodingInfo(src + "/child1", usingSchema); verifyErasureCodingInfo(src + "/child1", usingSchema);
} }
private void verifyErasureCodingInfo( private void verifyErasureCodingInfo(
String src, ECSchema usingSchema) throws IOException { String src, ECSchema usingSchema) throws IOException {
ErasureCodingInfo ecInfo = fs.getClient().getErasureCodingInfo(src); HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
assertNotNull("ECInfo should have been non-null", ecInfo); ECSchema schema = hdfsFileStatus.getECSchema();
assertEquals(src, ecInfo.getSrc());
ECSchema schema = ecInfo.getSchema();
assertNotNull(schema); assertNotNull(schema);
assertEquals("Actually used schema should be equal with target schema", assertEquals("Actually used schema should be equal with target schema",
usingSchema, schema); usingSchema, schema);

View File

@ -67,9 +67,9 @@
</test> </test>
<test> <test>
<description>help: getZoneInfo command</description> <description>help: getZone command</description>
<test-commands> <test-commands>
<ec-admin-command>-fs NAMENODE -help getZoneInfo</ec-admin-command> <ec-admin-command>-fs NAMENODE -help getZone</ec-admin-command>
</test-commands> </test-commands>
<cleanup-commands> <cleanup-commands>
</cleanup-commands> </cleanup-commands>
@ -80,7 +80,7 @@
</comparator> </comparator>
<comparator> <comparator>
<type>RegexpComparator</type> <type>RegexpComparator</type>
<expected-output>^-getZoneInfo &lt;path&gt;(.)*</expected-output> <expected-output>^-getZone &lt;path&gt;(.)*</expected-output>
</comparator> </comparator>
</comparators> </comparators>
</test> </test>
@ -145,7 +145,7 @@
<test-commands> <test-commands>
<command>-fs NAMENODE -mkdir /eczone</command> <command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command> <ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone</ec-admin-command> <ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
</test-commands> </test-commands>
<cleanup-commands> <cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command> <command>-fs NAMENODE -rmdir /eczone</command>
@ -159,10 +159,10 @@
</test> </test>
<test> <test>
<description>getZoneInfo : get information about the EC zone at specified path not in zone</description> <description>getZone : get information about the EC zone at specified path not in zone</description>
<test-commands> <test-commands>
<command>-fs NAMENODE -mkdir /noec</command> <command>-fs NAMENODE -mkdir /noec</command>
<ec-admin-command>-fs NAMENODE -getZoneInfo /noec</ec-admin-command> <ec-admin-command>-fs NAMENODE -getZone /noec</ec-admin-command>
</test-commands> </test-commands>
<cleanup-commands> <cleanup-commands>
<command>-fs NAMENODE -rmdir /noec</command> <command>-fs NAMENODE -rmdir /noec</command>
@ -176,11 +176,11 @@
</test> </test>
<test> <test>
<description>getZoneInfo : get information about the EC zone at specified path</description> <description>getZone : get information about the EC zone at specified path</description>
<test-commands> <test-commands>
<command>-fs NAMENODE -mkdir /eczone</command> <command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command> <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone</ec-admin-command> <ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
</test-commands> </test-commands>
<cleanup-commands> <cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command> <command>-fs NAMENODE -rmdir /eczone</command>
@ -194,12 +194,12 @@
</test> </test>
<test> <test>
<description>getZoneInfo : get EC zone at specified file path</description> <description>getZone : get EC zone at specified file path</description>
<test-commands> <test-commands>
<command>-fs NAMENODE -mkdir /eczone</command> <command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command> <ec-admin-command>-fs NAMENODE -createZone -s RS-6-3 /eczone</ec-admin-command>
<command>-fs NAMENODE -touchz /eczone/ecfile</command> <command>-fs NAMENODE -touchz /eczone/ecfile</command>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone/ecfile</ec-admin-command> <ec-admin-command>-fs NAMENODE -getZone /eczone/ecfile</ec-admin-command>
</test-commands> </test-commands>
<cleanup-commands> <cleanup-commands>
<command>-fs NAMENODE -rm /eczone/ecfile</command> <command>-fs NAMENODE -rm /eczone/ecfile</command>
@ -313,24 +313,24 @@
</test> </test>
<test> <test>
<description>getZoneInfo : illegal parameters - path is missing</description> <description>getZone : illegal parameters - path is missing</description>
<test-commands> <test-commands>
<ec-admin-command>-fs NAMENODE -getZoneInfo </ec-admin-command> <ec-admin-command>-fs NAMENODE -getZone </ec-admin-command>
</test-commands> </test-commands>
<cleanup-commands> <cleanup-commands>
</cleanup-commands> </cleanup-commands>
<comparators> <comparators>
<comparator> <comparator>
<type>RegexpComparator</type> <type>RegexpComparator</type>
<expected-output>^-getZoneInfo: &lt;path&gt; is missing(.)*</expected-output> <expected-output>^-getZone: &lt;path&gt; is missing(.)*</expected-output>
</comparator> </comparator>
</comparators> </comparators>
</test> </test>
<test> <test>
<description>getZoneInfo : illegal parameters - too many arguments</description> <description>getZone : illegal parameters - too many arguments</description>
<test-commands> <test-commands>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone /eczone</ec-admin-command> <ec-admin-command>-fs NAMENODE -getZone /eczone /eczone</ec-admin-command>
</test-commands> </test-commands>
<cleanup-commands> <cleanup-commands>
<command>-fs NAMENODE -rm /eczone</command> <command>-fs NAMENODE -rm /eczone</command>
@ -338,22 +338,22 @@
<comparators> <comparators>
<comparator> <comparator>
<type>SubstringComparator</type> <type>SubstringComparator</type>
<expected-output>-getZoneInfo: Too many arguments</expected-output> <expected-output>-getZone: Too many arguments</expected-output>
</comparator> </comparator>
</comparators> </comparators>
</test> </test>
<test> <test>
<description>getZoneInfo : illegal parameters - no such file</description> <description>getZone : illegal parameters - no such file</description>
<test-commands> <test-commands>
<ec-admin-command>-fs NAMENODE -getZoneInfo /eczone</ec-admin-command> <ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
</test-commands> </test-commands>
<cleanup-commands> <cleanup-commands>
</cleanup-commands> </cleanup-commands>
<comparators> <comparators>
<comparator> <comparator>
<type>RegexpComparator</type> <type>RegexpComparator</type>
<expected-output>^getZoneInfo: `/eczone': No such file or directory(.)*</expected-output> <expected-output>^getZone: `/eczone': No such file or directory(.)*</expected-output>
</comparator> </comparator>
</comparators> </comparators>
</test> </test>