HDFS-8833. Erasure coding: store EC schema and cell size in INodeFile and eliminate notion of EC zones.

This commit is contained in:
Zhe Zhang 2015-09-09 23:07:20 -07:00
parent 60bd765ac1
commit f62237bc2f
52 changed files with 474 additions and 727 deletions

View File

@ -1485,11 +1485,13 @@ List<XAttr> listXAttrs(String src)
EventBatchList getEditsFromTxid(long txid) throws IOException;
/**
* Create an erasure coding zone with specified policy, if any, otherwise
* default
* Set an erasure coding policy on a specified path.
* @param src The path to set policy on.
* @param ecPolicy The erasure coding policy. If null, default policy will
* be used
*/
@AtMostOnce
void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
throws IOException;
/**
@ -1501,11 +1503,11 @@ void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException;
/**
* Get the information about the EC zone for the path
*
* Get the information about the EC policy for the path
*
* @param src path to get the info for
* @throws IOException
*/
@Idempotent
ErasureCodingZone getErasureCodingZone(String src) throws IOException;
ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException;
}

View File

@ -1,54 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.protocol;
/**
* Information about the EC Zone at the specified path.
*/
public class ErasureCodingZone {
private String dir;
private ErasureCodingPolicy ecPolicy;
public ErasureCodingZone(String dir, ErasureCodingPolicy ecPolicy) {
this.dir = dir;
this.ecPolicy = ecPolicy;
}
/**
* Get directory of the EC zone.
*
* @return
*/
public String getDir() {
return dir;
}
/**
* Get the erasure coding policy for the EC Zone
*
* @return
*/
public ErasureCodingPolicy getErasureCodingPolicy() {
return ecPolicy;
}
@Override
public String toString() {
return "Dir: " + getDir() + ", Policy: " + ecPolicy;
}
}

View File

@ -867,14 +867,14 @@ service ClientNamenodeProtocol {
returns(ListEncryptionZonesResponseProto);
rpc getEZForPath(GetEZForPathRequestProto)
returns(GetEZForPathResponseProto);
rpc createErasureCodingZone(CreateErasureCodingZoneRequestProto)
returns(CreateErasureCodingZoneResponseProto);
rpc setErasureCodingPolicy(SetErasureCodingPolicyRequestProto)
returns(SetErasureCodingPolicyResponseProto);
rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto)
returns(GetCurrentEditLogTxidResponseProto);
rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
returns(GetEditsFromTxidResponseProto);
rpc getErasureCodingPolicies(GetErasureCodingPoliciesRequestProto)
returns(GetErasureCodingPoliciesResponseProto);
rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
returns(GetErasureCodingZoneResponseProto);
rpc getErasureCodingPolicy(GetErasureCodingPolicyRequestProto)
returns(GetErasureCodingPolicyResponseProto);
}

View File

@ -23,20 +23,12 @@ package hadoop.hdfs;
import "hdfs.proto";
/**
* ErasureCodingZone
*/
message ErasureCodingZoneProto {
required string dir = 1;
required ErasureCodingPolicyProto ecPolicy = 2;
}
message CreateErasureCodingZoneRequestProto {
message SetErasureCodingPolicyRequestProto {
required string src = 1;
optional ErasureCodingPolicyProto ecPolicy = 2;
}
message CreateErasureCodingZoneResponseProto {
message SetErasureCodingPolicyResponseProto {
}
message GetErasureCodingPoliciesRequestProto { // void request
@ -46,12 +38,12 @@ message GetErasureCodingPoliciesResponseProto {
repeated ErasureCodingPolicyProto ecPolicies = 1;
}
message GetErasureCodingZoneRequestProto {
required string src = 1; // path to get the zone info
message GetErasureCodingPolicyRequestProto {
required string src = 1; // path to get the policy info
}
message GetErasureCodingZoneResponseProto {
optional ErasureCodingZoneProto ECZone = 1;
message GetErasureCodingPolicyResponseProto {
optional ErasureCodingPolicyProto ecPolicy = 1;
}
/**

View File

@ -412,3 +412,6 @@
HDFS-8978. Erasure coding: fix 2 failed tests of DFSStripedOutputStream.
(Walter Su via jing9)
HDFS-8833. Erasure coding: store EC schema and cell size in INodeFile and
eliminate notion of EC zones. (zhz)

View File

@ -114,7 +114,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -2901,12 +2900,13 @@ public RemoteIterator<EncryptionZone> listEncryptionZones()
return new EncryptionZoneIterator(namenode, traceSampler);
}
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("createErasureCodingZone", src);
TraceScope scope = getPathTraceScope("setErasureCodingPolicy", src);
try {
namenode.createErasureCodingZone(src, ecPolicy);
namenode.setErasureCodingPolicy(src, ecPolicy);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
SafeModeException.class,
@ -3223,17 +3223,19 @@ TraceScope getSrcDstTraceScope(String description, String src, String dst) {
}
/**
* Get the erasure coding zone information for the specified path
*
* Get the erasure coding policy information for the specified path
*
* @param src path to get the information for
* @return Returns the zone information if path is in EC Zone, null otherwise
* @return Returns the policy information if file or directory on the path is
* erasure coded, null otherwise
* @throws IOException
*/
public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getErasureCodingZone", src);
TraceScope scope = getPathTraceScope("getErasureCodingPolicy", src);
try {
return namenode.getErasureCodingZone(src);
return namenode.getErasureCodingPolicy(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class, UnresolvedPathException.class);

View File

@ -73,7 +73,6 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -2263,20 +2262,20 @@ public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
}
/**
* Create the erasurecoding zone
*
* @param path Directory to create the ec zone
* @param ecPolicy erasure coding policy for the zone. If not specified default will be used.
* Set the source path to the specified erasure coding policy.
*
* @param path The directory to set the policy
* @param ecPolicy The erasure coding policy. If not specified default will be used.
* @throws IOException
*/
public void createErasureCodingZone(final Path path, final ErasureCodingPolicy ecPolicy)
public void setErasureCodingPolicy(final Path path, final ErasureCodingPolicy ecPolicy)
throws IOException {
Path absF = fixRelativePart(path);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException,
UnresolvedLinkException {
dfs.createErasureCodingZone(getPathName(p), ecPolicy);
dfs.setErasureCodingPolicy(getPathName(p), ecPolicy);
return null;
}
@ -2284,42 +2283,43 @@ public Void doCall(final Path p) throws IOException,
public Void next(final FileSystem fs, final Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
myDfs.createErasureCodingZone(p, ecPolicy);
myDfs.setErasureCodingPolicy(p, ecPolicy);
return null;
}
throw new UnsupportedOperationException(
"Cannot createErasureCodingZone through a symlink to a "
"Cannot setErasureCodingPolicy through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p);
}
}.resolve(this, absF);
}
/**
* Get ErasureCoding zone information for the specified path
*
* @param path
* @return Returns the zone information if path is in EC zone, null otherwise
* Get erasure coding policy information for the specified path
*
* @param path The path of the file or directory
* @return Returns the policy information if file or directory on the path
* is erasure coded, null otherwise
* @throws IOException
*/
public ErasureCodingZone getErasureCodingZone(final Path path)
public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
throws IOException {
Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<ErasureCodingZone>() {
return new FileSystemLinkResolver<ErasureCodingPolicy>() {
@Override
public ErasureCodingZone doCall(final Path p) throws IOException,
public ErasureCodingPolicy doCall(final Path p) throws IOException,
UnresolvedLinkException {
return dfs.getErasureCodingZone(getPathName(p));
return dfs.getErasureCodingPolicy(getPathName(p));
}
@Override
public ErasureCodingZone next(final FileSystem fs, final Path p)
public ErasureCodingPolicy next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
return myDfs.getErasureCodingZone(p);
return myDfs.getErasureCodingPolicy(p);
}
throw new UnsupportedOperationException(
"Cannot getErasureCodingZone through a symlink to a "
"Cannot getErasureCodingPolicy through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p);
}
}.resolve(this, absF);

View File

@ -37,7 +37,6 @@
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
@ -367,31 +366,33 @@ public void setStoragePolicy(final Path src, final String policyName)
}
/**
* Create the ErasureCoding zone
* Set the source path to the specified erasure coding policy.
*
* @param path Directory to create the ErasureCoding zone
* @param ecPolicy erasure coding policy for the zone. If null, the default will be used.
* @param path The source path referring to a directory.
* @param ecPolicy The erasure coding policy for the directory.
* If null, the default will be used.
* @throws IOException
*/
public void createErasureCodingZone(final Path path,
public void setErasureCodingPolicy(final Path path,
final ErasureCodingPolicy ecPolicy) throws IOException {
dfs.createErasureCodingZone(path, ecPolicy);
dfs.setErasureCodingPolicy(path, ecPolicy);
}
/**
* Get the ErasureCoding zone information for the specified path
* Get the erasure coding policy information for the specified path
*
* @param path
* @return Returns the zone information if path is in EC zone, null otherwise
* @return Returns the policy information if file or directory on the path is
* erasure coded. Null otherwise.
* @throws IOException
*/
public ErasureCodingZone getErasureCodingZone(final Path path)
public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
throws IOException {
return dfs.getErasureCodingZone(path);
return dfs.getErasureCodingPolicy(path);
}
/**
* Get the ErasureCoding policies supported.
* Get the Erasure coding policies supported.
*
* @throws IOException
*/

View File

@ -1,54 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.protocol;
/**
* Information about the EC Zone at the specified path.
*/
public class ErasureCodingZone {
private String dir;
private ErasureCodingPolicy ecPolicy;
public ErasureCodingZone(String dir, ErasureCodingPolicy ecPolicy) {
this.dir = dir;
this.ecPolicy = ecPolicy;
}
/**
* Get directory of the EC zone.
*
* @return
*/
public String getDir() {
return dir;
}
/**
* Get the erasure coding policy for the EC Zone
*
* @return
*/
public ErasureCodingPolicy getErasureCodingPolicy() {
return ecPolicy;
}
@Override
public String toString() {
return "Dir: " + getDir() + ", Policy: " + ecPolicy;
}
}

View File

@ -35,7 +35,6 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -203,10 +202,10 @@
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
@ -1402,14 +1401,14 @@ public ListEncryptionZonesResponseProto listEncryptionZones(
}
@Override
public CreateErasureCodingZoneResponseProto createErasureCodingZone(
RpcController controller, CreateErasureCodingZoneRequestProto req)
public SetErasureCodingPolicyResponseProto setErasureCodingPolicy(
RpcController controller, SetErasureCodingPolicyRequestProto req)
throws ServiceException {
try {
ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(req
.getEcPolicy()) : null;
server.createErasureCodingZone(req.getSrc(), ecPolicy);
return CreateErasureCodingZoneResponseProto.newBuilder().build();
server.setErasureCodingPolicy(req.getSrc(), ecPolicy);
return SetErasureCodingPolicyResponseProto.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
}
@ -1554,13 +1553,13 @@ public GetErasureCodingPoliciesResponseProto getErasureCodingPolicies(RpcControl
}
@Override
public GetErasureCodingZoneResponseProto getErasureCodingZone(RpcController controller,
GetErasureCodingZoneRequestProto request) throws ServiceException {
public GetErasureCodingPolicyResponseProto getErasureCodingPolicy(RpcController controller,
GetErasureCodingPolicyRequestProto request) throws ServiceException {
try {
ErasureCodingZone ecZone = server.getErasureCodingZone(request.getSrc());
GetErasureCodingZoneResponseProto.Builder builder = GetErasureCodingZoneResponseProto.newBuilder();
if (ecZone != null) {
builder.setECZone(PBHelper.convertErasureCodingZone(ecZone));
ErasureCodingPolicy ecPolicy = server.getErasureCodingPolicy(request.getSrc());
GetErasureCodingPolicyResponseProto.Builder builder = GetErasureCodingPolicyResponseProto.newBuilder();
if (ecPolicy != null) {
builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy));
}
return builder.build();
} catch (IOException e) {

View File

@ -58,7 +58,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -168,9 +167,9 @@
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
@ -1420,17 +1419,17 @@ public BatchedEntries<EncryptionZone> listEncryptionZones(long id)
}
@Override
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
throws IOException {
final CreateErasureCodingZoneRequestProto.Builder builder =
CreateErasureCodingZoneRequestProto.newBuilder();
final SetErasureCodingPolicyRequestProto.Builder builder =
SetErasureCodingPolicyRequestProto.newBuilder();
builder.setSrc(src);
if (ecPolicy != null) {
builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy));
}
CreateErasureCodingZoneRequestProto req = builder.build();
SetErasureCodingPolicyRequestProto req = builder.build();
try {
rpcProxy.createErasureCodingZone(null, req);
rpcProxy.setErasureCodingPolicy(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@ -1577,14 +1576,14 @@ public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
}
@Override
public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
GetErasureCodingZoneRequestProto req = GetErasureCodingZoneRequestProto.newBuilder()
public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
GetErasureCodingPolicyRequestProto req = GetErasureCodingPolicyRequestProto.newBuilder()
.setSrc(src).build();
try {
GetErasureCodingZoneResponseProto response = rpcProxy.getErasureCodingZone(
GetErasureCodingPolicyResponseProto response = rpcProxy.getErasureCodingPolicy(
null, req);
if (response.hasECZone()) {
return PBHelper.convertErasureCodingZone(response.getECZone());
if (response.hasEcPolicy()) {
return PBHelper.convertErasureCodingPolicy(response.getEcPolicy());
}
return null;
} catch (ServiceException e) {

View File

@ -75,7 +75,6 @@
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -132,7 +131,6 @@
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecoveryInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
@ -2975,19 +2973,6 @@ public static ErasureCodingPolicyProto convertErasureCodingPolicy(
.setCellSize(policy.getCellSize());
return builder.build();
}
public static ErasureCodingZoneProto convertErasureCodingZone(
ErasureCodingZone ecZone) {
return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir())
.setEcPolicy(convertErasureCodingPolicy(ecZone.getErasureCodingPolicy()))
.build();
}
public static ErasureCodingZone convertErasureCodingZone(
ErasureCodingZoneProto ecZoneProto) {
return new ErasureCodingZone(ecZoneProto.getDir(),
convertErasureCodingPolicy(ecZoneProto.getEcPolicy()));
}
public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
BlockECRecoveryInfoProto blockEcRecoveryInfoProto) {

View File

@ -55,7 +55,6 @@
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -924,11 +923,9 @@ public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks,
final boolean isFileUnderConstruction, final long offset,
final long length, final boolean needBlockToken,
final boolean inSnapshot, FileEncryptionInfo feInfo,
ErasureCodingZone ecZone)
ErasureCodingPolicy ecPolicy)
throws IOException {
assert namesystem.hasReadLock();
final ErasureCodingPolicy ecPolicy = ecZone != null ? ecZone
.getErasureCodingPolicy() : null;
if (blocks == null) {
return null;
} else if (blocks.length == 0) {
@ -1607,14 +1604,14 @@ private boolean validateRecoveryWork(BlockRecoveryWork rw) {
assert rw instanceof ErasureCodingWork;
assert rw.getTargets().length > 0;
String src = getBlockCollection(block).getName();
ErasureCodingZone ecZone = null;
ErasureCodingPolicy ecPolicy = null;
try {
ecZone = namesystem.getErasureCodingZoneForPath(src);
ecPolicy = namesystem.getErasureCodingPolicyForPath(src);
} catch (IOException e) {
blockLog
.warn("Failed to get the EC zone for the file {} ", src);
.warn("Failed to get EC policy for the file {} ", src);
}
if (ecZone == null) {
if (ecPolicy == null) {
blockLog.warn("No erasure coding policy found for the file {}. "
+ "So cannot proceed for recovery", src);
// TODO: we may have to revisit later for what we can do better to
@ -1624,8 +1621,7 @@ private boolean validateRecoveryWork(BlockRecoveryWork rw) {
rw.getTargets()[0].getDatanodeDescriptor().addBlockToBeErasureCoded(
new ExtendedBlock(namesystem.getBlockPoolId(), block),
rw.getSrcNodes(), rw.getTargets(),
((ErasureCodingWork) rw).getLiveBlockIndicies(),
ecZone.getErasureCodingPolicy());
((ErasureCodingWork) rw).getLiveBlockIndicies(), ecPolicy);
} else {
rw.getSrcNodes()[0].addBlockToBeReplicated(block, targets);
}

View File

@ -387,8 +387,8 @@ enum BlockUCState {
"raw.hdfs.crypto.file.encryption.info";
String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
"security.hdfs.unreadable.by.superuser";
String XATTR_ERASURECODING_ZONE =
"raw.hdfs.erasurecoding.zone";
String XATTR_ERASURECODING_POLICY =
"raw.hdfs.erasurecoding.policy";
long BLOCK_GROUP_INDEX_MASK = 15;
byte MAX_BLOCKS_IN_GROUP = 16;

View File

@ -83,7 +83,7 @@ public static ErasureCodingPolicy[] getSystemPolices() {
/**
* Get system-wide default policy, which can be used by default
* when no policy is specified for an EC zone.
* when no policy is specified for a path.
* @return ecPolicy
*/
public static ErasureCodingPolicy getSystemDefaultPolicy() {

View File

@ -1,163 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.WritableUtils;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_ZONE;
/**
* Manages the list of erasure coding zones in the filesystem.
* <p/>
* The ErasureCodingZoneManager has its own lock, but relies on the FSDirectory
* lock being held for many operations. The FSDirectory lock should not be
* taken if the manager lock is already held.
* TODO: consolidate zone logic w/ encrypt. zones {@link EncryptionZoneManager}
*/
public class ErasureCodingZoneManager {
private final FSDirectory dir;
/**
* Construct a new ErasureCodingZoneManager.
*
* @param dir Enclosing FSDirectory
*/
public ErasureCodingZoneManager(FSDirectory dir) {
this.dir = dir;
}
ErasureCodingPolicy getErasureCodingPolicy(INodesInPath iip) throws IOException {
ErasureCodingZone ecZone = getErasureCodingZone(iip);
return ecZone == null ? null : ecZone.getErasureCodingPolicy();
}
ErasureCodingZone getErasureCodingZone(INodesInPath iip) throws IOException {
assert dir.hasReadLock();
Preconditions.checkNotNull(iip, "INodes cannot be null");
List<INode> inodes = iip.getReadOnlyINodes();
for (int i = inodes.size() - 1; i >= 0; i--) {
final INode inode = inodes.get(i);
if (inode == null) {
continue;
}
// We don't allow symlinks in an EC zone, or pointing to a file/dir in
// an EC. Therefore if a symlink is encountered, the dir shouldn't have
// EC
// TODO: properly support symlinks in EC zones
if (inode.isSymlink()) {
return null;
}
final List<XAttr> xAttrs = inode.getXAttrFeature() == null ?
new ArrayList<XAttr>(0)
: inode.getXAttrFeature().getXAttrs();
for (XAttr xAttr : xAttrs) {
if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixedName(xAttr))) {
ByteArrayInputStream bIn=new ByteArrayInputStream(xAttr.getValue());
DataInputStream dIn=new DataInputStream(bIn);
String ecPolicyName = WritableUtils.readString(dIn);
ErasureCodingPolicy ecPolicy = dir.getFSNamesystem()
.getErasureCodingPolicyManager().getPolicy(ecPolicyName);
return new ErasureCodingZone(dir.getInode(inode.getId())
.getFullPathName(), ecPolicy);
}
}
}
return null;
}
List<XAttr> createErasureCodingZone(final INodesInPath srcIIP,
ErasureCodingPolicy ecPolicy) throws IOException {
assert dir.hasWriteLock();
Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
String src = srcIIP.getPath();
if (dir.isNonEmptyDirectory(srcIIP)) {
throw new IOException(
"Attempt to create an erasure coding zone for a " +
"non-empty directory " + src);
}
if (srcIIP.getLastINode() != null &&
!srcIIP.getLastINode().isDirectory()) {
throw new IOException("Attempt to create an erasure coding zone " +
"for a file " + src);
}
if (getErasureCodingPolicy(srcIIP) != null) {
throw new IOException("Directory " + src + " is already in an " +
"erasure coding zone.");
}
// System default erasure coding policy will be used since no specified.
if (ecPolicy == null) {
ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
}
final XAttr ecXAttr;
DataOutputStream dOut = null;
try {
ByteArrayOutputStream bOut = new ByteArrayOutputStream();
dOut = new DataOutputStream(bOut);
WritableUtils.writeString(dOut, ecPolicy.getName());
ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_ZONE,
bOut.toByteArray());
} finally {
IOUtils.closeStream(dOut);
}
final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
xattrs.add(ecXAttr);
FSDirXAttrOp.unprotectedSetXAttrs(dir, src, xattrs,
EnumSet.of(XAttrSetFlag.CREATE));
return xattrs;
}
void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
throws IOException {
assert dir.hasReadLock();
final ErasureCodingZone srcZone = getErasureCodingZone(srcIIP);
final ErasureCodingZone dstZone = getErasureCodingZone(dstIIP);
if (srcZone != null && srcZone.getDir().equals(src) && dstZone == null) {
return;
}
final ErasureCodingPolicy srcECPolicy =
srcZone != null ? srcZone.getErasureCodingPolicy() : null;
final ErasureCodingPolicy dstECPolicy =
dstZone != null ? dstZone.getErasureCodingPolicy() : null;
if (srcECPolicy != null && !srcECPolicy.equals(dstECPolicy) ||
dstECPolicy != null && !dstECPolicy.equals(srcECPolicy)) {
throw new IOException(
src + " can't be moved because the source and destination have " +
"different erasure coding policies.");
}
}
}

View File

@ -17,14 +17,27 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.WritableUtils;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY;
/**
* Helper class to perform erasure coding related operations.
@ -38,18 +51,17 @@ final class FSDirErasureCodingOp {
private FSDirErasureCodingOp() {}
/**
* Create an erasure coding zone on directory src.
* Set an erasure coding policy on the given path.
*
* @param fsn namespace
* @param srcArg the path of a directory which will be the root of the
* erasure coding zone. The directory must be empty.
* @param ecPolicy erasure coding policy for the erasure coding zone
* @param fsn The namespace
* @param srcArg The path of the target directory.
* @param ecPolicy The erasure coding policy to set on the target directory.
* @param logRetryCache whether to record RPC ids in editlog for retry
* cache rebuilding
* @return {@link HdfsFileStatus}
* @throws IOException
*/
static HdfsFileStatus createErasureCodingZone(final FSNamesystem fsn,
static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn,
final String srcArg, final ErasureCodingPolicy ecPolicy,
final boolean logRetryCache) throws IOException {
assert fsn.hasWriteLock();
@ -66,8 +78,7 @@ static HdfsFileStatus createErasureCodingZone(final FSNamesystem fsn,
fsd.writeLock();
try {
iip = fsd.getINodesInPath4Write(src, false);
xAttrs = fsn.getErasureCodingZoneManager().createErasureCodingZone(
iip, ecPolicy);
xAttrs = createErasureCodingPolicyXAttr(fsn, iip, ecPolicy);
} finally {
fsd.writeUnlock();
}
@ -75,62 +86,83 @@ static HdfsFileStatus createErasureCodingZone(final FSNamesystem fsn,
return fsd.getAuditFileInfo(iip);
}
static List<XAttr> createErasureCodingPolicyXAttr(final FSNamesystem fsn,
final INodesInPath srcIIP, ErasureCodingPolicy ecPolicy) throws IOException {
FSDirectory fsd = fsn.getFSDirectory();
assert fsd.hasWriteLock();
Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
String src = srcIIP.getPath();
if (srcIIP.getLastINode() != null &&
!srcIIP.getLastINode().isDirectory()) {
throw new IOException("Attempt to set an erasure coding policy " +
"for a file " + src);
}
if (getErasureCodingPolicyForPath(fsn, srcIIP) != null) {
throw new IOException("Directory " + src + " already has an " +
"erasure coding policy.");
}
// System default erasure coding policy will be used since no specified.
if (ecPolicy == null) {
ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
}
final XAttr ecXAttr;
DataOutputStream dOut = null;
try {
ByteArrayOutputStream bOut = new ByteArrayOutputStream();
dOut = new DataOutputStream(bOut);
WritableUtils.writeString(dOut, ecPolicy.getName());
ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_POLICY,
bOut.toByteArray());
} finally {
IOUtils.closeStream(dOut);
}
final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
xattrs.add(ecXAttr);
FSDirXAttrOp.unprotectedSetXAttrs(fsd, src, xattrs,
EnumSet.of(XAttrSetFlag.CREATE));
return xattrs;
}
/**
* Get the erasure coding zone information for specified path.
* Get the erasure coding policy information for specified path.
*
* @param fsn namespace
* @param src path
* @return {@link ErasureCodingZone}
* @return {@link ErasureCodingPolicy}
* @throws IOException
*/
static ErasureCodingZone getErasureCodingZone(final FSNamesystem fsn,
static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
final String src) throws IOException {
assert fsn.hasReadLock();
final INodesInPath iip = getINodesInPath(fsn, src);
return getErasureCodingZoneForPath(fsn, iip);
return getErasureCodingPolicyForPath(fsn, iip);
}
/**
* Get erasure coding zone information for specified path.
*
* @param fsn namespace
* @param iip inodes in the path containing the file
* @return {@link ErasureCodingZone}
* @throws IOException
*/
static ErasureCodingZone getErasureCodingZone(final FSNamesystem fsn,
final INodesInPath iip) throws IOException {
assert fsn.hasReadLock();
return getErasureCodingZoneForPath(fsn, iip);
}
/**
* Check if the file is in erasure coding zone.
* Check if the file or directory has an erasure coding policy.
*
* @param fsn namespace
* @param srcArg path
* @return true represents the file is in erasure coding zone, false otw
* @return Whether the file or directory has an erasure coding policy.
* @throws IOException
*/
static boolean isInErasureCodingZone(final FSNamesystem fsn,
static boolean hasErasureCodingPolicy(final FSNamesystem fsn,
final String srcArg) throws IOException {
assert fsn.hasReadLock();
final INodesInPath iip = getINodesInPath(fsn, srcArg);
return getErasureCodingPolicyForPath(fsn, iip) != null;
return hasErasureCodingPolicy(fsn, getINodesInPath(fsn, srcArg));
}
/**
* Check if the file is in erasure coding zone.
* Check if the file or directory has an erasure coding policy.
*
* @param fsn namespace
* @param iip inodes in the path containing the file
* @return true represents the file is in erasure coding zone, false otw
* @return Whether the file or directory has an erasure coding policy.
* @throws IOException
*/
static boolean isInErasureCodingZone(final FSNamesystem fsn,
static boolean hasErasureCodingPolicy(final FSNamesystem fsn,
final INodesInPath iip) throws IOException {
return getErasureCodingPolicy(fsn, iip) != null;
}
@ -178,25 +210,46 @@ private static INodesInPath getINodesInPath(final FSNamesystem fsn,
return iip;
}
private static ErasureCodingZone getErasureCodingZoneForPath(
final FSNamesystem fsn, final INodesInPath iip) throws IOException {
final FSDirectory fsd = fsn.getFSDirectory();
private static ErasureCodingPolicy getErasureCodingPolicyForPath(FSNamesystem fsn,
INodesInPath iip) throws IOException {
Preconditions.checkNotNull(iip, "INodes cannot be null");
FSDirectory fsd = fsn.getFSDirectory();
fsd.readLock();
try {
return fsn.getErasureCodingZoneManager().getErasureCodingZone(iip);
} finally {
fsd.readUnlock();
}
}
private static ErasureCodingPolicy getErasureCodingPolicyForPath(final FSNamesystem fsn,
final INodesInPath iip) throws IOException {
final FSDirectory fsd = fsn.getFSDirectory();
fsd.readLock();
try {
return fsn.getErasureCodingZoneManager().getErasureCodingPolicy(iip);
List<INode> inodes = iip.getReadOnlyINodes();
for (int i = inodes.size() - 1; i >= 0; i--) {
final INode inode = inodes.get(i);
if (inode == null) {
continue;
}
/**
* TODO: lookup {@link ErasureCodingPolicyManager#getSystemPolices()}
*/
if (inode.isFile()) {
return inode.asFile().getErasureCodingPolicyID() == 0 ?
null : ErasureCodingPolicyManager.getSystemDefaultPolicy();
}
// We don't allow setting EC policies on paths with a symlink. Thus
// if a symlink is encountered, the dir shouldn't have EC policy.
// TODO: properly support symlinks
if (inode.isSymlink()) {
return null;
}
final XAttrFeature xaf = inode.getXAttrFeature();
if (xaf != null) {
XAttr xattr = xaf.getXAttr(XATTR_ERASURECODING_POLICY);
if (xattr != null) {
ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue());
DataInputStream dIn = new DataInputStream(bIn);
String ecPolicyName = WritableUtils.readString(dIn);
return fsd.getFSNamesystem().getErasureCodingPolicyManager().
getPolicy(ecPolicyName);
}
}
}
} finally {
fsd.readUnlock();
}
return null;
}
}

View File

@ -185,7 +185,6 @@ static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
}
fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src);
fsd.ecZoneManager.checkMoveValidity(srcIIP, dstIIP, src);
// Ensure dst has quota to accommodate rename
verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
verifyQuotaForRename(fsd, srcIIP, dstIIP);
@ -358,7 +357,6 @@ static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src);
fsd.ecZoneManager.checkMoveValidity(srcIIP, dstIIP, src);
final INode dstInode = dstIIP.getLastINode();
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
if (dstInode != null) { // Destination exists

View File

@ -31,7 +31,6 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -182,12 +181,12 @@ static GetBlockLocationsResult getBlockLocations(
final FileEncryptionInfo feInfo = isReservedName ? null
: fsd.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip);
final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
fsd.getFSNamesystem(), iip);
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
getErasureCodingPolicy(fsd.getFSNamesystem(), iip);
final LocatedBlocks blocks = bm.createLocatedBlocks(
inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset,
length, needBlockToken, iip.isSnapshot(), feInfo, ecZone);
length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
// Set caching information for the located blocks.
for (LocatedBlock lb : blocks.getLocatedBlocks()) {
@ -447,10 +446,8 @@ static HdfsFileStatus createFileStatus(
final FileEncryptionInfo feInfo = isRawPath ? null :
fsd.getFileEncryptionInfo(node, snapshot, iip);
final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsd.getFSNamesystem(), iip);
final ErasureCodingPolicy ecPolicy =
ecZone != null ? ecZone.getErasureCodingPolicy() : null;
if (node.isFile()) {
final INodeFile fileNode = node.asFile();
@ -505,7 +502,7 @@ private static HdfsLocatedFileStatus createLocatedFileStatus(
final boolean isEncrypted;
final FileEncryptionInfo feInfo = isRawPath ? null :
fsd.getFileEncryptionInfo(node, snapshot, iip);
final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsd.getFSNamesystem(), iip);
if (node.isFile()) {
final INodeFile fileNode = node.asFile();
@ -520,7 +517,7 @@ private static HdfsLocatedFileStatus createLocatedFileStatus(
loc = fsd.getBlockManager().createLocatedBlocks(
fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false,
inSnapshot, feInfo, ecZone);
inSnapshot, feInfo, ecPolicy);
if (loc == null) {
loc = new LocatedBlocks();
}
@ -531,8 +528,6 @@ private static HdfsLocatedFileStatus createLocatedFileStatus(
}
int childrenNum = node.isDirectory() ?
node.asDirectory().getChildrenNum(snapshot) : 0;
final ErasureCodingPolicy ecPolicy =
ecZone != null ? ecZone.getErasureCodingPolicy() : null;
HdfsLocatedFileStatus status =
new HdfsLocatedFileStatus(size, node.isDirectory(), replication,

View File

@ -37,7 +37,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -486,8 +485,8 @@ static INodeFile addFileForEditLog(
Preconditions.checkNotNull(existing);
assert fsd.hasWriteLock();
try {
// check if the file is in an EC zone
final boolean isStriped = FSDirErasureCodingOp.isInErasureCodingZone(
// check if the file has an EC policy
final boolean isStriped = FSDirErasureCodingOp.hasErasureCodingPolicy(
fsd.getFSNamesystem(), existing);
if (underConstruction) {
newNode = newINodeFile(id, permissions, modificationTime,
@ -533,9 +532,8 @@ private static BlockInfo addBlock(FSDirectory fsd, String path,
// associate new last block for the file
final BlockInfo blockInfo;
if (isStriped) {
ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsd.getFSNamesystem(), inodesInPath);
ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy();
short numDataUnits = (short) ecPolicy.getNumDataUnits();
short numParityUnits = (short) ecPolicy.getNumParityUnits();
short numLocations = (short) (numDataUnits + numParityUnits);
@ -586,7 +584,7 @@ private static INodesInPath addFile(
INodesInPath newiip;
fsd.writeLock();
try {
final boolean isStriped = FSDirErasureCodingOp.isInErasureCodingZone(
final boolean isStriped = FSDirErasureCodingOp.hasErasureCodingPolicy(
fsd.getFSNamesystem(), existing);
INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
modTime, modTime, replication, preferredBlockSize, isStriped);

View File

@ -215,9 +215,6 @@ public int getWriteHoldCount() {
@VisibleForTesting
public final EncryptionZoneManager ezManager;
@VisibleForTesting
public final ErasureCodingZoneManager ecZoneManager;
/**
* Caches frequently used file names used in {@link INode} to reuse
* byte[] objects and reduce heap usage.
@ -314,7 +311,6 @@ public int getWriteHoldCount() {
namesystem = ns;
this.editLog = ns.getEditLog();
ezManager = new EncryptionZoneManager(this, conf);
ecZoneManager = new ErasureCodingZoneManager(this);
}
FSNamesystem getFSNamesystem() {

View File

@ -36,7 +36,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
@ -416,9 +416,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// Update the salient file attributes.
newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsDir.getFSNamesystem(), iip);
updateBlocks(fsDir, addCloseOp, iip, newFile, ecZone);
updateBlocks(fsDir, addCloseOp, iip, newFile, ecPolicy);
break;
}
case OP_CLOSE: {
@ -438,9 +438,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// Update the salient file attributes.
file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsDir.getFSNamesystem(), iip);
updateBlocks(fsDir, addCloseOp, iip, file, ecZone);
updateBlocks(fsDir, addCloseOp, iip, file, ecPolicy);
// Now close the file
if (!file.isUnderConstruction() &&
@ -498,9 +498,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
INodesInPath iip = fsDir.getINodesInPath(path, true);
INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
// Update in-memory data structures
ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsDir.getFSNamesystem(), iip);
updateBlocks(fsDir, updateOp, iip, oldFile, ecZone);
updateBlocks(fsDir, updateOp, iip, oldFile, ecPolicy);
if (toAddRetryCache) {
fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId);
@ -517,9 +517,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
INodesInPath iip = fsDir.getINodesInPath(path, true);
INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
// add the new block to the INodeFile
ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
fsDir.getFSNamesystem(), iip);
addNewBlock(addBlockOp, oldFile, ecZone);
addNewBlock(addBlockOp, oldFile, ecPolicy);
break;
}
case OP_SET_REPLICATION: {
@ -961,7 +961,7 @@ private static String formatEditLogReplayError(EditLogInputStream in,
* Add a new block into the given INodeFile
*/
private void addNewBlock(AddBlockOp op, INodeFile file,
ErasureCodingZone ecZone) throws IOException {
ErasureCodingPolicy ecPolicy) throws IOException {
BlockInfo[] oldBlocks = file.getBlocks();
Block pBlock = op.getPenultimateBlock();
Block newBlock= op.getLastBlock();
@ -988,10 +988,9 @@ private void addNewBlock(AddBlockOp op, INodeFile file,
}
// add the new block
final BlockInfo newBlockInfo;
boolean isStriped = ecZone != null;
boolean isStriped = ecPolicy != null;
if (isStriped) {
newBlockInfo = new BlockInfoStriped(newBlock,
ecZone.getErasureCodingPolicy());
newBlockInfo = new BlockInfoStriped(newBlock, ecPolicy);
} else {
newBlockInfo = new BlockInfoContiguous(newBlock,
file.getPreferredBlockReplication());
@ -1008,7 +1007,7 @@ private void addNewBlock(AddBlockOp op, INodeFile file,
* @throws IOException
*/
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
INodesInPath iip, INodeFile file, ErasureCodingZone ecZone)
INodesInPath iip, INodeFile file, ErasureCodingPolicy ecPolicy)
throws IOException {
// Update its block list
BlockInfo[] oldBlocks = file.getBlocks();
@ -1068,7 +1067,7 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
throw new IOException("Trying to delete non-existant block " + oldBlock);
}
} else if (newBlocks.length > oldBlocks.length) {
final boolean isStriped = ecZone != null;
final boolean isStriped = ecPolicy != null;
// We're adding blocks
for (int i = oldBlocks.length; i < newBlocks.length; i++) {
Block newBlock = newBlocks[i];
@ -1078,8 +1077,7 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
// what about an old-version fsync() where fsync isn't called
// until several blocks in?
if (isStriped) {
newBI = new BlockInfoStriped(newBlock,
ecZone.getErasureCodingPolicy());
newBI = new BlockInfoStriped(newBlock, ecPolicy);
} else {
newBI = new BlockInfoContiguous(newBlock,
file.getPreferredBlockReplication());

View File

@ -177,7 +177,6 @@
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -187,7 +186,6 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@ -2133,7 +2131,7 @@ private HdfsFileStatus startFileInt(final String src,
readLock();
try {
checkOperation(OperationCategory.READ);
if (!FSDirErasureCodingOp.isInErasureCodingZone(this, src)) {
if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, src)) {
blockManager.verifyReplication(src, replication, clientMachine);
}
} finally {
@ -3206,9 +3204,8 @@ void commitOrCompleteLastBlock(
final long diff;
final short replicationFactor;
if (fileINode.isStriped()) {
final ErasureCodingZone ecZone = FSDirErasureCodingOp
.getErasureCodingZone(this, iip);
final ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy();
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp
.getErasureCodingPolicy(this, iip);
final short numDataUnits = (short) ecPolicy.getNumDataUnits();
final short numParityUnits = (short) ecPolicy.getNumParityUnits();
@ -6241,11 +6238,6 @@ public ErasureCodingPolicyManager getErasureCodingPolicyManager() {
return ecPolicyManager;
}
/** @return the ErasureCodingZoneManager. */
public ErasureCodingZoneManager getErasureCodingZoneManager() {
return dir.ecZoneManager;
}
@Override // NameNodeMXBean
public String getCorruptFiles() {
List<String> list = new ArrayList<String>();
@ -7192,15 +7184,14 @@ BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
}
/**
* Create an erasure coding zone on directory src.
* @param srcArg the path of a directory which will be the root of the
* erasure coding zone. The directory must be empty.
* @param ecPolicy erasure coding policy for the erasure coding zone
* Set an erasure coding policy on the given path.
* @param srcArg The path of the target directory.
* @param ecPolicy The erasure coding policy to set on the target directory.
* @throws AccessControlException if the caller is not the superuser.
* @throws UnresolvedLinkException if the path can't be resolved.
* @throws SafeModeException if the Namenode is in safe mode.
*/
void createErasureCodingZone(final String srcArg, final ErasureCodingPolicy
void setErasureCodingPolicy(final String srcArg, final ErasureCodingPolicy
ecPolicy, final boolean logRetryCache) throws IOException,
UnresolvedLinkException, SafeModeException, AccessControlException {
checkSuperuserPrivilege();
@ -7210,8 +7201,8 @@ void createErasureCodingZone(final String srcArg, final ErasureCodingPolicy
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create erasure coding zone on " + srcArg);
resultingStat = FSDirErasureCodingOp.createErasureCodingZone(this,
checkNameNodeSafeMode("Cannot set erasure coding policy on " + srcArg);
resultingStat = FSDirErasureCodingOp.setErasureCodingPolicy(this,
srcArg, ecPolicy, logRetryCache);
success = true;
} finally {
@ -7219,21 +7210,21 @@ void createErasureCodingZone(final String srcArg, final ErasureCodingPolicy
if (success) {
getEditLog().logSync();
}
logAuditEvent(success, "createErasureCodingZone", srcArg, null,
logAuditEvent(success, "setErasureCodingPolicy", srcArg, null,
resultingStat);
}
}
/**
* Get the erasure coding zone information for specified path
* Get the erasure coding policy information for specified path
*/
ErasureCodingZone getErasureCodingZone(String src)
ErasureCodingPolicy getErasureCodingPolicy(String src)
throws AccessControlException, UnresolvedLinkException, IOException {
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
return getErasureCodingZoneForPath(src);
return getErasureCodingPolicyForPath(src);
} finally {
readUnlock();
}
@ -7461,9 +7452,9 @@ public String getTotalSyncTimes() {
}
@Override
public ErasureCodingZone getErasureCodingZoneForPath(String src)
public ErasureCodingPolicy getErasureCodingPolicyForPath(String src)
throws IOException {
return FSDirErasureCodingOp.getErasureCodingZone(this, src);
return FSDirErasureCodingOp.getErasureCodingPolicy(this, src);
}
}

View File

@ -437,6 +437,20 @@ public final void setStoragePolicyID(byte storagePolicyId,
setStoragePolicyID(storagePolicyId);
}
/**
* @return The ID of the erasure coding policy on the file. 0 represents no
* EC policy (file is in contiguous format). 1 represents the system
* default EC policy:
* {@link ErasureCodingPolicyManager#SYS_DEFAULT_POLICY}.
* TODO: support more policies by reusing {@link HeaderFormat#REPLICATION}.
*/
@VisibleForTesting
@Override
public byte getErasureCodingPolicyID() {
return isStriped() ? (byte)1 : (byte)0;
}
/**
* @return true if the file is in the striping layout.
*/

View File

@ -20,7 +20,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
/**
* The attributes of a file.
*/
@ -32,6 +32,9 @@ public interface INodeFileAttributes extends INodeAttributes {
/** @return whether the file is striped (instead of contiguous) */
public boolean isStriped();
/** @return whether the file is striped (instead of contiguous) */
public byte getErasureCodingPolicyID();
/** @return preferred block size in bytes */
public long getPreferredBlockSize();
@ -77,6 +80,11 @@ public boolean isStriped() {
return HeaderFormat.isStriped(header);
}
@Override
public byte getErasureCodingPolicyID() {
return isStriped() ? (byte)1 : (byte)0;
}
@Override
public long getPreferredBlockSize() {
return HeaderFormat.getPreferredBlockSize(header);

View File

@ -85,7 +85,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
@ -1423,8 +1422,8 @@ public void refreshServiceAcl() throws IOException {
@Override // RefreshAuthorizationPolicyProtocol
public void refreshUserToGroupsMappings() throws IOException {
LOG.info("Refreshing all user-to-groups mappings. Requested by user: " +
getRemoteUser().getShortUserName());
LOG.info("Refreshing all user-to-groups mappings. Requested by user: " +
getRemoteUser().getShortUserName());
Groups.getUserToGroupsMappingService().refresh();
}
@ -1557,7 +1556,7 @@ public String createSnapshot(String snapshotRoot, String snapshotName)
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
null);
null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (String) cacheEntry.getPayload();
}
@ -1849,7 +1848,7 @@ public BatchedEntries<EncryptionZone> listEncryptionZones(
}
@Override // ClientProtocol
public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
throws IOException {
checkNNStartup();
final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@ -1858,7 +1857,7 @@ public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy)
}
boolean success = false;
try {
namesystem.createErasureCodingZone(src, ecPolicy, cacheEntry != null);
namesystem.setErasureCodingPolicy(src, ecPolicy, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
@ -2068,8 +2067,8 @@ public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
}
@Override // ClientProtocol
public ErasureCodingZone getErasureCodingZone(String src) throws IOException {
public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
checkNNStartup();
return namesystem.getErasureCodingZone(src);
return namesystem.getErasureCodingPolicy(src);
}
}

View File

@ -21,7 +21,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
@ -52,16 +52,16 @@ public interface Namesystem extends RwLock, SafeMode {
void checkOperation(OperationCategory read) throws StandbyException;
/**
* Gets the ECZone for path
* Gets the erasure coding policy for the path
* @param src
* - path
* @return {@link ErasureCodingZone}
* @return {@link ErasureCodingPolicy}
* @throws IOException
*/
ErasureCodingZone getErasureCodingZoneForPath(String src)
ErasureCodingPolicy getErasureCodingPolicyForPath(String src)
throws IOException;
boolean isInSnapshot(BlockInfo blockUC);
CacheManager getCacheManager();
}
}

View File

@ -30,7 +30,6 @@
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.util.StringUtils;
@ -45,9 +44,9 @@ public abstract class ECCommand extends Command {
public static void registerCommands(CommandFactory factory) {
// Register all commands of Erasure CLI, with a '-' at the beginning in name
// of the command.
factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME);
factory.addClass(GetECZoneCommand.class, "-"
+ GetECZoneCommand.NAME);
factory.addClass(SetECPolicyCommand.class, "-" + SetECPolicyCommand.NAME);
factory.addClass(GetECPolicyCommand.class, "-"
+ GetECPolicyCommand.NAME);
factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME);
}
@ -76,17 +75,18 @@ protected void processPath(PathData item) throws IOException {
}
/**
* A command to create an EC zone for a path, with a erasure coding policy name.
* A command to set the erasure coding policy for a directory, with the name
* of the policy.
*/
static class CreateECZoneCommand extends ECCommand {
public static final String NAME = "createZone";
static class SetECPolicyCommand extends ECCommand {
public static final String NAME = "setPolicy";
public static final String USAGE = "[-s <policyName>] <path>";
public static final String DESCRIPTION =
"Create a zone to encode files using a specified policy\n"
"Set a specified erasure coding policy to a directory\n"
+ "Options :\n"
+ " -s <policyName> : erasure coding policy name to encode files. "
+ "If not passed the default policy will be used\n"
+ " <path> : Path to an empty directory. Under this directory "
+ " <path> : Path to a directory. Under this directory "
+ "files will be encoded using specified erasure coding policy";
private String ecPolicyName;
private ErasureCodingPolicy ecPolicy = null;
@ -129,23 +129,23 @@ protected void processPath(PathData item) throws IOException {
throw new HadoopIllegalArgumentException(sb.toString());
}
}
dfs.createErasureCodingZone(item.path, ecPolicy);
out.println("EC Zone created successfully at " + item.path);
dfs.setErasureCodingPolicy(item.path, ecPolicy);
out.println("EC policy set successfully at " + item.path);
} catch (IOException e) {
throw new IOException("Unable to create EC zone for the path "
throw new IOException("Unable to set EC policy for the path "
+ item.path + ". " + e.getMessage());
}
}
}
/**
* Get the information about the zone
* Get the erasure coding policy of a file or directory
*/
static class GetECZoneCommand extends ECCommand {
public static final String NAME = "getZone";
static class GetECPolicyCommand extends ECCommand {
public static final String NAME = "getPolicy";
public static final String USAGE = "<path>";
public static final String DESCRIPTION =
"Get information about the EC zone at specified path\n";
"Get erasure coding policy information about at specified path\n";
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
@ -162,14 +162,14 @@ protected void processPath(PathData item) throws IOException {
super.processPath(item);
DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
try {
ErasureCodingZone ecZone = dfs.getErasureCodingZone(item.path);
if (ecZone != null) {
out.println(ecZone.toString());
ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(item.path);
if (ecPolicy != null) {
out.println(ecPolicy.toString());
} else {
out.println("Path " + item.path + " is not in EC zone");
out.println("Path " + item.path + " is not erasure coded.");
}
} catch (IOException e) {
throw new IOException("Unable to get EC zone for the path "
throw new IOException("Unable to get EC policy for the path "
+ item.path + ". " + e.getMessage());
}
}

View File

@ -1892,12 +1892,12 @@ public static StorageReceivedDeletedBlocks[] makeReportForReceivedBlock(
public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir,
int numBlocks, int numStripesPerBlk, boolean toMkdir) throws Exception {
DistributedFileSystem dfs = cluster.getFileSystem();
// If outer test already created EC zone, dir should be left as null
// If outer test already set EC policy, dir should be left as null
if (toMkdir) {
assert dir != null;
dfs.mkdirs(dir);
try {
dfs.getClient().createErasureCodingZone(dir.toString(), null);
dfs.getClient().setErasureCodingPolicy(dir.toString(), null);
} catch (IOException e) {
if (!e.getMessage().contains("non-empty directory")) {
throw e;

View File

@ -80,7 +80,7 @@ public void setup() throws IOException {
}
fs = cluster.getFileSystem();
fs.mkdirs(dirPath);
fs.getClient().createErasureCodingZone(dirPath.toString(), null);
fs.getClient().setErasureCodingPolicy(dirPath.toString(), null);
}
@After

View File

@ -68,7 +68,7 @@ public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
fs = cluster.getFileSystem();
}

View File

@ -118,7 +118,7 @@ private void setup(Configuration conf) throws IOException {
cluster.waitActive();
dfs = cluster.getFileSystem();
dfs.mkdirs(dir);
dfs.createErasureCodingZone(dir, null);
dfs.setErasureCodingPolicy(dir, null);
}
private void tearDown() {

View File

@ -35,7 +35,7 @@
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.*;
public class TestErasureCodingZones {
public class TestErasureCodingPolicies {
private Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
@ -59,52 +59,57 @@ public void shutdownCluster() throws IOException {
}
@Test
public void testCreateECZone()
public void testBasicSetECPolicy()
throws IOException, InterruptedException {
final Path testDir = new Path("/ec");
fs.mkdir(testDir, FsPermission.getDirDefault());
/* Normal creation of an erasure coding zone */
fs.getClient().createErasureCodingZone(testDir.toString(), null);
/* Normal creation of an erasure coding directory */
fs.getClient().setErasureCodingPolicy(testDir.toString(), null);
/* Verify files under the zone are striped */
/* Verify files under the directory are striped */
final Path ECFilePath = new Path(testDir, "foo");
fs.create(ECFilePath);
INode inode = namesystem.getFSDirectory().getINode(ECFilePath.toString());
assertTrue(inode.asFile().isStriped());
/* Verify that EC zone cannot be created on non-empty dir */
/**
* Verify that setting EC policy on non-empty directory only affects
* newly created files under the directory.
*/
final Path notEmpty = new Path("/nonEmpty");
fs.mkdir(notEmpty, FsPermission.getDirDefault());
fs.create(new Path(notEmpty, "foo"));
final Path oldFile = new Path(notEmpty, "old");
fs.create(oldFile);
fs.getClient().setErasureCodingPolicy(notEmpty.toString(), null);
final Path newFile = new Path(notEmpty, "new");
fs.create(newFile);
INode oldInode = namesystem.getFSDirectory().getINode(oldFile.toString());
assertFalse(oldInode.asFile().isStriped());
INode newInode = namesystem.getFSDirectory().getINode(newFile.toString());
assertTrue(newInode.asFile().isStriped());
/* Verify that nested EC policies not supported */
final Path dir1 = new Path("/dir1");
final Path dir2 = new Path(dir1, "dir2");
fs.mkdir(dir1, FsPermission.getDirDefault());
fs.getClient().setErasureCodingPolicy(dir1.toString(), null);
fs.mkdir(dir2, FsPermission.getDirDefault());
try {
fs.getClient().createErasureCodingZone(notEmpty.toString(), null);
fail("Erasure coding zone on non-empty dir");
fs.getClient().setErasureCodingPolicy(dir2.toString(), null);
fail("Nested erasure coding policies");
} catch (IOException e) {
assertExceptionContains("erasure coding zone for a non-empty directory", e);
assertExceptionContains("already has an erasure coding policy", e);
}
/* Verify that nested EC zones cannot be created */
final Path zone1 = new Path("/zone1");
final Path zone2 = new Path(zone1, "zone2");
fs.mkdir(zone1, FsPermission.getDirDefault());
fs.getClient().createErasureCodingZone(zone1.toString(), null);
fs.mkdir(zone2, FsPermission.getDirDefault());
try {
fs.getClient().createErasureCodingZone(zone2.toString(), null);
fail("Nested erasure coding zones");
} catch (IOException e) {
assertExceptionContains("already in an erasure coding zone", e);
}
/* Verify that EC zone cannot be created on a file */
/* Verify that EC policy cannot be set on a file */
final Path fPath = new Path("/file");
fs.create(fPath);
try {
fs.getClient().createErasureCodingZone(fPath.toString(), null);
fail("Erasure coding zone on file");
fs.getClient().setErasureCodingPolicy(fPath.toString(), null);
fail("Erasure coding policy on file");
} catch (IOException e) {
assertExceptionContains("erasure coding zone for a file", e);
assertExceptionContains("erasure coding policy for a file", e);
}
}
@ -114,8 +119,8 @@ public void testMoveValidity() throws IOException, InterruptedException {
final Path dstECDir = new Path("/dstEC");
fs.mkdir(srcECDir, FsPermission.getDirDefault());
fs.mkdir(dstECDir, FsPermission.getDirDefault());
fs.getClient().createErasureCodingZone(srcECDir.toString(), null);
fs.getClient().createErasureCodingZone(dstECDir.toString(), null);
fs.getClient().setErasureCodingPolicy(srcECDir.toString(), null);
fs.getClient().setErasureCodingPolicy(dstECDir.toString(), null);
final Path srcFile = new Path(srcECDir, "foo");
fs.create(srcFile);
@ -130,37 +135,26 @@ public void testMoveValidity() throws IOException, InterruptedException {
fs.rename(new Path("/dstEC/srcEC"), srcECDir); // move back
// Test move file
/* Verify that a file can be moved between 2 EC zones */
/* Verify that a file can be moved between 2 EC dirs */
fs.rename(srcFile, dstECDir);
fs.rename(new Path(dstECDir, "foo"), srcECDir); // move back
/* Verify that a file cannot be moved from a non-EC dir to an EC zone */
/* Verify that a file can be moved from a non-EC dir to an EC dir */
final Path nonECDir = new Path("/nonEC");
fs.mkdir(nonECDir, FsPermission.getDirDefault());
try {
fs.rename(srcFile, nonECDir);
fail("A file shouldn't be able to move from a non-EC dir to an EC zone");
} catch (IOException e) {
assertExceptionContains("can't be moved because the source and " +
"destination have different erasure coding policies", e);
}
fs.rename(srcFile, nonECDir);
/* Verify that a file cannot be moved from an EC zone to a non-EC dir */
/* Verify that a file can be moved from an EC dir to a non-EC dir */
final Path nonECFile = new Path(nonECDir, "nonECFile");
fs.create(nonECFile);
try {
fs.rename(nonECFile, dstECDir);
} catch (IOException e) {
assertExceptionContains("can't be moved because the source and " +
"destination have different erasure coding policies", e);
}
fs.rename(nonECFile, dstECDir);
}
@Test
public void testReplication() throws IOException {
final Path testDir = new Path("/ec");
fs.mkdir(testDir, FsPermission.getDirDefault());
fs.createErasureCodingZone(testDir, null);
fs.setErasureCodingPolicy(testDir, null);
final Path fooFile = new Path(testDir, "foo");
// create ec file with replication=0
fs.create(fooFile, FsPermission.getFileDefault(), true,
@ -171,23 +165,23 @@ public void testReplication() throws IOException {
}
@Test
public void testGetErasureCodingInfoWithSystemDefaultECPolicy() throws Exception {
public void testGetErasureCodingPolicyWithSystemDefaultECPolicy() throws Exception {
String src = "/ec";
final Path ecDir = new Path(src);
fs.mkdir(ecDir, FsPermission.getDirDefault());
// dir ECInfo before creating ec zone
// dir EC policy should be null
assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
// dir ECInfo after creating ec zone
fs.getClient().createErasureCodingZone(src, null); //Default one will be used.
// dir EC policy after setting
fs.getClient().setErasureCodingPolicy(src, null); //Default one will be used.
ErasureCodingPolicy sysDefaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
verifyErasureCodingInfo(src, sysDefaultECPolicy);
fs.create(new Path(ecDir, "child1")).close();
// verify for the files in ec zone
// verify for the files in ec dir
verifyErasureCodingInfo(src + "/child1", sysDefaultECPolicy);
}
@Test
public void testGetErasureCodingInfo() throws Exception {
public void testGetErasureCodingPolicy() throws Exception {
ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager.getSystemPolices();
assertTrue("System ecPolicies should be of only 1 for now",
sysECPolicies.length == 1);
@ -196,13 +190,13 @@ public void testGetErasureCodingInfo() throws Exception {
String src = "/ec2";
final Path ecDir = new Path(src);
fs.mkdir(ecDir, FsPermission.getDirDefault());
// dir ECInfo before creating ec zone
// dir ECInfo before being set
assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
// dir ECInfo after creating ec zone
fs.getClient().createErasureCodingZone(src, usingECPolicy);
// dir ECInfo after set
fs.getClient().setErasureCodingPolicy(src, usingECPolicy);
verifyErasureCodingInfo(src, usingECPolicy);
fs.create(new Path(ecDir, "child1")).close();
// verify for the files in ec zone
// verify for the files in ec dir
verifyErasureCodingInfo(src + "/child1", usingECPolicy);
}

View File

@ -38,24 +38,24 @@ public void after() {
@Test
public void testFileStatusWithECPolicy() throws Exception {
// test directory not in EC zone
// test directory doesn't have an EC policy
final Path dir = new Path("/foo");
assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy());
// test file not in EC zone
// test file doesn't have an EC policy
final Path file = new Path(dir, "foo");
fs.create(file).close();
assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
fs.delete(file, true);
final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy();
// create EC zone on dir
fs.createErasureCodingZone(dir, ecPolicy1);
// set EC policy on dir
fs.setErasureCodingPolicy(dir, ecPolicy1);
final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
assertNotNull(ecPolicy2);
assertTrue(ecPolicy1.equals(ecPolicy2));
// test file in EC zone
// test file doesn't have an EC policy
fs.create(file).close();
final ErasureCodingPolicy ecPolicy3 =
fs.getClient().getFileInfo(file.toUri().getPath()).getErasureCodingPolicy();

View File

@ -68,7 +68,7 @@ public void setup() throws IOException {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
.numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
fs = cluster.getFileSystem();
}

View File

@ -53,7 +53,7 @@ public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
fs = cluster.getFileSystem();
}

View File

@ -78,7 +78,7 @@ public void setup() throws IOException {
cluster.waitActive();
fs = cluster.getFileSystem();
fs.getClient().createErasureCodingZone("/", null);
fs.getClient().setErasureCodingPolicy("/", null);
List<DataNode> datanodes = cluster.getDataNodes();
for (int i = 0; i < dnNum; i++) {

View File

@ -54,7 +54,7 @@ public void setup() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
cluster.waitActive();
}

View File

@ -57,7 +57,7 @@ public class TestWriteReadStripedFile {
public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
fs = cluster.getFileSystem();
}

View File

@ -30,7 +30,6 @@
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.cellSize;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.dataBlocks;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.parityBlocks;
@ -48,7 +47,7 @@ public class TestWriteStripedFileWithFailure {
public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
fs = cluster.getFileSystem();
}
@ -158,4 +157,4 @@ void write(FSDataOutputStream out, int i) throws IOException {
throw new IOException("Failed at i=" + i, e);
}
}
}
}

View File

@ -82,7 +82,6 @@
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
@ -1702,7 +1701,7 @@ private void doTestBalancerWithStripedFile(Configuration conf) throws Exception
cluster.waitActive();
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
client.createErasureCodingZone("/", null);
client.setErasureCodingPolicy("/", null);
long totalCapacity = sum(capacities);

View File

@ -56,7 +56,7 @@ public void testRead() throws Exception {
conf = getConf();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient()
.createErasureCodingZone("/", null);
.setErasureCodingPolicy("/", null);
try {
cluster.waitActive();
doTestRead(conf, cluster, true);

View File

@ -71,7 +71,7 @@ public class TestSequentialBlockGroupId {
private MiniDFSCluster cluster;
private FileSystem fs;
private SequentialBlockGroupIdGenerator blockGrpIdGenerator;
private Path eczone = new Path("/eczone");
private Path ecDir = new Path("/ecDir");
@Before
public void setup() throws Exception {
@ -84,9 +84,9 @@ public void setup() throws Exception {
fs = cluster.getFileSystem();
blockGrpIdGenerator = cluster.getNamesystem().getBlockIdManager()
.getBlockGroupIdGenerator();
fs.mkdirs(eczone);
fs.mkdirs(ecDir);
cluster.getFileSystem().getClient()
.createErasureCodingZone("/eczone", null);
.setErasureCodingPolicy("/ecDir", null);
}
@After
@ -104,7 +104,7 @@ public void testBlockGroupIdGeneration() throws IOException {
long blockGroupIdInitialValue = blockGrpIdGenerator.getCurrentValue();
// Create a file that is 4 blocks long.
Path path = new Path(eczone, "testBlockGrpIdGeneration.dat");
Path path = new Path(ecDir, "testBlockGrpIdGeneration.dat");
DFSTestUtil.createFile(fs, path, cellSize, fileLen, blockSize, REPLICATION,
SEED);
List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
@ -134,7 +134,7 @@ public void testTriggerBlockGroupIdCollision() throws IOException {
// Create a file with a few blocks to rev up the global block ID
// counter.
Path path1 = new Path(eczone, "testBlockGrpIdCollisionDetection_file1.dat");
Path path1 = new Path(ecDir, "testBlockGrpIdCollisionDetection_file1.dat");
DFSTestUtil.createFile(fs, path1, cellSize, fileLen, blockSize,
REPLICATION, SEED);
List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);
@ -145,7 +145,7 @@ public void testTriggerBlockGroupIdCollision() throws IOException {
blockGrpIdGenerator.setCurrentValue(blockGroupIdInitialValue);
// Trigger collisions by creating a new file.
Path path2 = new Path(eczone, "testBlockGrpIdCollisionDetection_file2.dat");
Path path2 = new Path(ecDir, "testBlockGrpIdCollisionDetection_file2.dat");
DFSTestUtil.createFile(fs, path2, cellSize, fileLen, blockSize,
REPLICATION, SEED);
List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
@ -204,7 +204,7 @@ public long nextValue() {
// Reset back to the initial value to trigger collision
blockGrpIdGenerator.setCurrentValue(blockGroupIdInitialValue);
// Trigger collisions by creating a new file.
Path path2 = new Path(eczone, "testCollisionWithLegacyBlock_file2.dat");
Path path2 = new Path(ecDir, "testCollisionWithLegacyBlock_file2.dat");
DFSTestUtil.createFile(fs, path2, cellSize, fileLen, blockSize,
REPLICATION, SEED);
List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);

View File

@ -470,8 +470,8 @@ public void testMoverWithStripedFile() throws Exception {
client.mkdirs(barDir, new FsPermission((short) 777), true);
client.setStoragePolicy(barDir,
HdfsConstants.HOT_STORAGE_POLICY_NAME);
// set "/bar" directory with EC zone.
client.createErasureCodingZone(barDir, null);
// set an EC policy on "/bar" directory
client.setErasureCodingPolicy(barDir, null);
// write file to barDir
final String fooFile = "/bar/foo";

View File

@ -30,7 +30,6 @@
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.junit.After;
@ -39,7 +38,6 @@
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import static org.junit.Assert.assertEquals;
@ -70,7 +68,7 @@ public void setup() throws IOException {
cluster.waitActive();
fs = cluster.getFileSystem();
fs.mkdirs(dirPath);
fs.getClient().createErasureCodingZone(dirPath.toString(), null);
fs.getClient().setErasureCodingPolicy(dirPath.toString(), null);
}
@After

View File

@ -74,7 +74,7 @@ public void setup() throws IOException {
.numDataNodes(GROUP_SIZE).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
dfs.getClient().createErasureCodingZone("/", null);
dfs.getClient().setErasureCodingPolicy("/", null);
}
@After

View File

@ -452,7 +452,7 @@ public void testAddNewStripedBlock() throws IOException{
//set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755"));
fs.getClient().getNamenode().createErasureCodingZone(testDir, null);
fs.getClient().getNamenode().setErasureCodingPolicy(testDir, null);
// Create a file with striped block
Path p = new Path(testFilePath);
@ -524,7 +524,7 @@ public void testUpdateStripedBlocks() throws IOException{
//set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755"));
fs.getClient().getNamenode().createErasureCodingZone(testDir, null);
fs.getClient().getNamenode().setErasureCodingPolicy(testDir, null);
//create a file with striped blocks
Path p = new Path(testFilePath);

View File

@ -141,7 +141,7 @@ private void testPersistHelper(Configuration conf) throws IOException {
private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
boolean isUC) throws IOException{
// contruct a INode with StripedBlock for saving and loading
fsn.createErasureCodingZone("/", null, false);
fsn.setErasureCodingPolicy("/", null, false);
long id = 123456789;
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
PermissionStatus permissionStatus = new PermissionStatus("testuser_a",
@ -425,7 +425,7 @@ public void testSupportBlockGroup() throws IOException {
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
fs.getClient().getNamenode().createErasureCodingZone("/", null);
fs.getClient().getNamenode().setErasureCodingPolicy("/", null);
Path file = new Path("/striped");
FSDataOutputStream out = fs.create(file);
byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE);

View File

@ -29,7 +29,6 @@
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Assert;
@ -66,7 +65,7 @@ public void setUp() throws IOException {
dfs = cluster.getFileSystem();
dfs.mkdirs(ecDir);
dfs.getClient().createErasureCodingZone(ecDir.toString(), ecPolicy);
dfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy);
dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);

View File

@ -217,8 +217,8 @@ public void testDeleteOp() throws Exception {
try {
final int len = 1024;
final Path parentDir = new Path("/parentDir");
final Path zone = new Path(parentDir, "zone");
final Path zoneFile = new Path(zone, "zoneFile");
final Path ecDir = new Path(parentDir, "ecDir");
final Path ecFile = new Path(ecDir, "ecFile");
final Path contiguousFile = new Path(parentDir, "someFile");
final DistributedFileSystem dfs;
final Configuration conf = new Configuration();
@ -232,18 +232,18 @@ public void testDeleteOp() throws Exception {
FSNamesystem fsn = cluster.getNamesystem();
dfs = cluster.getFileSystem();
dfs.mkdirs(zone);
dfs.mkdirs(ecDir);
// create erasure zone
dfs.createErasureCodingZone(zone, null);
DFSTestUtil.createFile(dfs, zoneFile, len, (short) 1, 0xFEED);
// set erasure coding policy
dfs.setErasureCodingPolicy(ecDir, null);
DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED);
DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
final FSDirectory fsd = fsn.getFSDirectory();
// Case-1: Verify the behavior of striped blocks
// Get blocks of striped file
INode inodeStriped = fsd.getINode("/parentDir/zone/zoneFile");
assertTrue("Failed to get INodeFile for /parentDir/zone/zoneFile",
INode inodeStriped = fsd.getINode("/parentDir/ecDir/ecFile");
assertTrue("Failed to get INodeFile for /parentDir/ecDir/ecFile",
inodeStriped instanceof INodeFile);
INodeFile inodeStripedFile = (INodeFile) inodeStriped;
BlockInfo[] stripedBlks = inodeStripedFile.getBlocks();
@ -252,8 +252,8 @@ public void testDeleteOp() throws Exception {
blockInfo.isDeleted());
}
// delete erasure zone directory
dfs.delete(zone, true);
// delete directory with erasure coding policy
dfs.delete(ecDir, true);
for (BlockInfo blockInfo : stripedBlks) {
assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted());
}

View File

@ -60,7 +60,7 @@ public static void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
cluster.getFileSystem().getClient().createErasureCodingZone("/", null);
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
fs = cluster.getFileSystem();
Path eczone = new Path("/eczone");
fs.mkdirs(eczone);

View File

@ -48,39 +48,39 @@
</test>
<test>
<description>help: createZone command</description>
<description>help: setPolicy command</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -help createZone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -help setPolicy</ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^[ \t]*Create a zone to encode files using a specified policy( )*</expected-output>
<expected-output>^[ \t]*Set a specified erasure coding policy to a directory( )*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-createZone \[-s &lt;policyName&gt;\] &lt;path&gt;(.)*</expected-output>
<expected-output>^-setPolicy \[-s &lt;policyName&gt;\] &lt;path&gt;(.)*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>help: getZone command</description>
<description>help: getPolicy command</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -help getZone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -help getPolicy</ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Get information about the EC zone at specified path</expected-output>
<expected-output>Get erasure coding policy information about at specified path</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-getZone &lt;path&gt;(.)*</expected-output>
<expected-output>^-getPolicy &lt;path&gt;(.)*</expected-output>
</comparator>
</comparators>
</test>
@ -106,63 +106,63 @@
<!-- Test erasure code commands -->
<test>
<description>createZone : create a zone to encode files</description>
<description>setPolicy : set erasure coding policy on a directory to encode files</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>EC Zone created successfully at NAMENODE/eczone</expected-output>
<expected-output>EC policy set successfully at NAMENODE/ecdir</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>createZone : create a zone twice</description>
<description>setPolicy : set a policy twice</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
<ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Directory /eczone is already in an erasure coding zone</expected-output>
<expected-output>Directory /ecdir already has an erasure coding policy</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>createZone : default policy</description>
<description>setPolicy : default policy</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
<expected-output>ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getZone : get information about the EC zone at specified path not in zone</description>
<description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
<test-commands>
<command>-fs NAMENODE -mkdir /noec</command>
<ec-admin-command>-fs NAMENODE -getZone /noec</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getPolicy /noec</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /noec</command>
@ -170,45 +170,45 @@
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Path NAMENODE/noec is not in EC zone</expected-output>
<expected-output>Path NAMENODE/noec is not erasure coded</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getZone : get information about the EC zone at specified path</description>
<description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
<expected-output>ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getZone : get EC zone at specified file path</description>
<description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s RS-6-3-64k /eczone</ec-admin-command>
<command>-fs NAMENODE -touchz /eczone/ecfile</command>
<ec-admin-command>-fs NAMENODE -getZone /eczone/ecfile</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir</ec-admin-command>
<command>-fs NAMENODE -touchz /ecdir/ecfile</command>
<ec-admin-command>-fs NAMENODE -getPolicy /ecdir/ecfile</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /eczone/ecfile</command>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rm /ecdir/ecfile</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
<expected-output>ErasureCodingPolicy=[Name=RS-6-3-64k</expected-output>
</comparator>
</comparators>
</test>
@ -230,64 +230,64 @@
<!-- Test illegal parameters -->
<test>
<description>createZone : illegal parameters - path is missing</description>
<description>setPolicy : illegal parameters - path is missing</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-createZone: &lt;path&gt; is missing(.)*</expected-output>
<expected-output>^-setPolicy: &lt;path&gt; is missing(.)*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>createZone : illegal parameters - policy name is missing</description>
<description>setPolicy : illegal parameters - policy name is missing</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy -s</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-createZone: option -s requires 1 argument(.)*</expected-output>
<expected-output>^-setPolicy: option -s requires 1 argument(.)*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>createZone : illegal parameters - too many arguments</description>
<description>setPolicy : illegal parameters - too many arguments</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone /eczone1 /eczone2</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy /ecdir1 /ecdir2</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>-createZone: Too many arguments</expected-output>
<expected-output>-setPolicy: Too many arguments</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>createZone : illegal parameters - invalidpolicy</description>
<description>setPolicy : illegal parameters - invalidpolicy</description>
<test-commands>
<command>-fs NAMENODE -mkdir /eczone</command>
<ec-admin-command>-fs NAMENODE -createZone -s invalidpolicy /eczone</ec-admin-command>
<command>-fs NAMENODE -mkdir /ecdir</command>
<ec-admin-command>-fs NAMENODE -setPolicy -s invalidpolicy /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rmdir /eczone</command>
<command>-fs NAMENODE -rmdir /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
@ -298,62 +298,62 @@
</test>
<test>
<description>createZone : illegal parameters - no such file</description>
<description>setPolicy : illegal parameters - no such file</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -createZone /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^createZone: `/eczone': No such file or directory(.)*</expected-output>
<expected-output>^setPolicy: `/ecdir': No such file or directory(.)*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getZone : illegal parameters - path is missing</description>
<description>getPolicy : illegal parameters - path is missing</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -getZone </ec-admin-command>
<ec-admin-command>-fs NAMENODE -getPolicy </ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-getZone: &lt;path&gt; is missing(.)*</expected-output>
<expected-output>^-getPolicy: &lt;path&gt; is missing(.)*</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getZone : illegal parameters - too many arguments</description>
<description>getPolicy : illegal parameters - too many arguments</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -getZone /eczone /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getPolicy /ecdir /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm /eczone</command>
<command>-fs NAMENODE -rm /ecdir</command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>-getZone: Too many arguments</expected-output>
<expected-output>-getPolicy: Too many arguments</expected-output>
</comparator>
</comparators>
</test>
<test>
<description>getZone : illegal parameters - no such file</description>
<description>getPolicy : illegal parameters - no such file</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -getZone /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^getZone: `/eczone': No such file or directory(.)*</expected-output>
<expected-output>^getPolicy: `/ecdir': No such file or directory(.)*</expected-output>
</comparator>
</comparators>
</test>
@ -361,7 +361,7 @@
<test>
<description>listPolicies : illegal parameters - too many parameters</description>
<test-commands>
<ec-admin-command>-fs NAMENODE -listPolicies /eczone</ec-admin-command>
<ec-admin-command>-fs NAMENODE -listPolicies /ecdir</ec-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>