diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 7756bb94f25..a0504a8fba3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1485,11 +1485,13 @@ public interface ClientProtocol { EventBatchList getEditsFromTxid(long txid) throws IOException; /** - * Create an erasure coding zone with specified policy, if any, otherwise - * default + * Set an erasure coding policy on a specified path. + * @param src The path to set policy on. + * @param ecPolicy The erasure coding policy. If null, default policy will + * be used */ @AtMostOnce - void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy) + void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy) throws IOException; /** @@ -1501,11 +1503,11 @@ public interface ClientProtocol { ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException; /** - * Get the information about the EC zone for the path - * + * Get the information about the EC policy for the path + * * @param src path to get the info for * @throws IOException */ @Idempotent - ErasureCodingZone getErasureCodingZone(String src) throws IOException; + ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java deleted file mode 100644 index 533b630893c..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdfs.protocol; - -/** - * Information about the EC Zone at the specified path. - */ -public class ErasureCodingZone { - - private String dir; - private ErasureCodingPolicy ecPolicy; - - public ErasureCodingZone(String dir, ErasureCodingPolicy ecPolicy) { - this.dir = dir; - this.ecPolicy = ecPolicy; - } - - /** - * Get directory of the EC zone. - * - * @return - */ - public String getDir() { - return dir; - } - - /** - * Get the erasure coding policy for the EC Zone - * - * @return - */ - public ErasureCodingPolicy getErasureCodingPolicy() { - return ecPolicy; - } - - @Override - public String toString() { - return "Dir: " + getDir() + ", Policy: " + ecPolicy; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index fb10e9c7d04..6a140ebe7ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -867,14 +867,14 @@ service ClientNamenodeProtocol { returns(ListEncryptionZonesResponseProto); rpc getEZForPath(GetEZForPathRequestProto) returns(GetEZForPathResponseProto); - rpc createErasureCodingZone(CreateErasureCodingZoneRequestProto) - returns(CreateErasureCodingZoneResponseProto); + rpc setErasureCodingPolicy(SetErasureCodingPolicyRequestProto) + returns(SetErasureCodingPolicyResponseProto); rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto) returns(GetCurrentEditLogTxidResponseProto); rpc getEditsFromTxid(GetEditsFromTxidRequestProto) returns(GetEditsFromTxidResponseProto); rpc getErasureCodingPolicies(GetErasureCodingPoliciesRequestProto) returns(GetErasureCodingPoliciesResponseProto); - rpc getErasureCodingZone(GetErasureCodingZoneRequestProto) - returns(GetErasureCodingZoneResponseProto); + rpc getErasureCodingPolicy(GetErasureCodingPolicyRequestProto) + returns(GetErasureCodingPolicyResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto index d27f7828f48..fa24aefa11d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto @@ -23,20 +23,12 @@ package hadoop.hdfs; import "hdfs.proto"; -/** - * ErasureCodingZone - */ -message ErasureCodingZoneProto { - required string dir = 1; - required ErasureCodingPolicyProto ecPolicy = 2; -} - -message CreateErasureCodingZoneRequestProto { +message SetErasureCodingPolicyRequestProto { required string src = 1; optional ErasureCodingPolicyProto ecPolicy = 2; } -message CreateErasureCodingZoneResponseProto { +message SetErasureCodingPolicyResponseProto { } message GetErasureCodingPoliciesRequestProto { // void request @@ -46,12 +38,12 @@ message GetErasureCodingPoliciesResponseProto { repeated ErasureCodingPolicyProto ecPolicies = 1; } -message GetErasureCodingZoneRequestProto { - required string src = 1; // path to get the zone info +message GetErasureCodingPolicyRequestProto { + required string src = 1; // path to get the policy info } -message GetErasureCodingZoneResponseProto { - optional ErasureCodingZoneProto ECZone = 1; +message GetErasureCodingPolicyResponseProto { + optional ErasureCodingPolicyProto ecPolicy = 1; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 9fdf3ed949d..2f133103e63 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -412,3 +412,6 @@ HDFS-8978. Erasure coding: fix 2 failed tests of DFSStripedOutputStream. (Walter Su via jing9) + + HDFS-8833. Erasure coding: store EC schema and cell size in INodeFile and + eliminate notion of EC zones. (zhz) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 12120f4b928..4d28dca1015 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -114,7 +114,6 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -2901,12 +2900,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, return new EncryptionZoneIterator(namenode, traceSampler); } - public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy) + + public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy) throws IOException { checkOpen(); - TraceScope scope = getPathTraceScope("createErasureCodingZone", src); + TraceScope scope = getPathTraceScope("setErasureCodingPolicy", src); try { - namenode.createErasureCodingZone(src, ecPolicy); + namenode.setErasureCodingPolicy(src, ecPolicy); } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, SafeModeException.class, @@ -3223,17 +3223,19 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } /** - * Get the erasure coding zone information for the specified path - * + * Get the erasure coding policy information for the specified path + * * @param src path to get the information for - * @return Returns the zone information if path is in EC Zone, null otherwise + * @return Returns the policy information if file or directory on the path is + * erasure coded, null otherwise * @throws IOException */ - public ErasureCodingZone getErasureCodingZone(String src) throws IOException { + + public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException { checkOpen(); - TraceScope scope = getPathTraceScope("getErasureCodingZone", src); + TraceScope scope = getPathTraceScope("getErasureCodingPolicy", src); try { - return namenode.getErasureCodingZone(src); + return namenode.getErasureCodingPolicy(src); } catch (RemoteException re) { throw re.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class, UnresolvedPathException.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 1f8ab448697..903f763d720 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -73,7 +73,6 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -2263,20 +2262,20 @@ public class DistributedFileSystem extends FileSystem { } /** - * Create the erasurecoding zone - * - * @param path Directory to create the ec zone - * @param ecPolicy erasure coding policy for the zone. If not specified default will be used. + * Set the source path to the specified erasure coding policy. + * + * @param path The directory to set the policy + * @param ecPolicy The erasure coding policy. If not specified default will be used. * @throws IOException */ - public void createErasureCodingZone(final Path path, final ErasureCodingPolicy ecPolicy) + public void setErasureCodingPolicy(final Path path, final ErasureCodingPolicy ecPolicy) throws IOException { Path absF = fixRelativePart(path); new FileSystemLinkResolver() { @Override public Void doCall(final Path p) throws IOException, UnresolvedLinkException { - dfs.createErasureCodingZone(getPathName(p), ecPolicy); + dfs.setErasureCodingPolicy(getPathName(p), ecPolicy); return null; } @@ -2284,42 +2283,43 @@ public class DistributedFileSystem extends FileSystem { public Void next(final FileSystem fs, final Path p) throws IOException { if (fs instanceof DistributedFileSystem) { DistributedFileSystem myDfs = (DistributedFileSystem) fs; - myDfs.createErasureCodingZone(p, ecPolicy); + myDfs.setErasureCodingPolicy(p, ecPolicy); return null; } throw new UnsupportedOperationException( - "Cannot createErasureCodingZone through a symlink to a " + "Cannot setErasureCodingPolicy through a symlink to a " + "non-DistributedFileSystem: " + path + " -> " + p); } }.resolve(this, absF); } /** - * Get ErasureCoding zone information for the specified path - * - * @param path - * @return Returns the zone information if path is in EC zone, null otherwise + * Get erasure coding policy information for the specified path + * + * @param path The path of the file or directory + * @return Returns the policy information if file or directory on the path + * is erasure coded, null otherwise * @throws IOException */ - public ErasureCodingZone getErasureCodingZone(final Path path) + public ErasureCodingPolicy getErasureCodingPolicy(final Path path) throws IOException { Path absF = fixRelativePart(path); - return new FileSystemLinkResolver() { + return new FileSystemLinkResolver() { @Override - public ErasureCodingZone doCall(final Path p) throws IOException, + public ErasureCodingPolicy doCall(final Path p) throws IOException, UnresolvedLinkException { - return dfs.getErasureCodingZone(getPathName(p)); + return dfs.getErasureCodingPolicy(getPathName(p)); } @Override - public ErasureCodingZone next(final FileSystem fs, final Path p) + public ErasureCodingPolicy next(final FileSystem fs, final Path p) throws IOException { if (fs instanceof DistributedFileSystem) { DistributedFileSystem myDfs = (DistributedFileSystem) fs; - return myDfs.getErasureCodingZone(p); + return myDfs.getErasureCodingPolicy(p); } throw new UnsupportedOperationException( - "Cannot getErasureCodingZone through a symlink to a " + "Cannot getErasureCodingPolicy through a symlink to a " + "non-DistributedFileSystem: " + path + " -> " + p); } }.resolve(this, absF); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index e6e67cbbe2c..7b055fc8f64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.EncryptionZone; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.hdfs.tools.DFSAdmin; @@ -367,31 +366,33 @@ public class HdfsAdmin { } /** - * Create the ErasureCoding zone + * Set the source path to the specified erasure coding policy. * - * @param path Directory to create the ErasureCoding zone - * @param ecPolicy erasure coding policy for the zone. If null, the default will be used. + * @param path The source path referring to a directory. + * @param ecPolicy The erasure coding policy for the directory. + * If null, the default will be used. * @throws IOException */ - public void createErasureCodingZone(final Path path, + public void setErasureCodingPolicy(final Path path, final ErasureCodingPolicy ecPolicy) throws IOException { - dfs.createErasureCodingZone(path, ecPolicy); + dfs.setErasureCodingPolicy(path, ecPolicy); } /** - * Get the ErasureCoding zone information for the specified path + * Get the erasure coding policy information for the specified path * * @param path - * @return Returns the zone information if path is in EC zone, null otherwise + * @return Returns the policy information if file or directory on the path is + * erasure coded. Null otherwise. * @throws IOException */ - public ErasureCodingZone getErasureCodingZone(final Path path) + public ErasureCodingPolicy getErasureCodingPolicy(final Path path) throws IOException { - return dfs.getErasureCodingZone(path); + return dfs.getErasureCodingPolicy(path); } /** - * Get the ErasureCoding policies supported. + * Get the Erasure coding policies supported. * * @throws IOException */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java deleted file mode 100644 index 533b630893c..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdfs.protocol; - -/** - * Information about the EC Zone at the specified path. - */ -public class ErasureCodingZone { - - private String dir; - private ErasureCodingPolicy ecPolicy; - - public ErasureCodingZone(String dir, ErasureCodingPolicy ecPolicy) { - this.dir = dir; - this.ecPolicy = ecPolicy; - } - - /** - * Get directory of the EC zone. - * - * @return - */ - public String getDir() { - return dir; - } - - /** - * Get the erasure coding policy for the EC Zone - * - * @return - */ - public ErasureCodingPolicy getErasureCodingPolicy() { - return ecPolicy; - } - - @Override - public String toString() { - return "Dir: " + getDir() + ", Policy: " + ecPolicy; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 97445a6bf1d..32174849e64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -203,10 +202,10 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptio import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; @@ -1402,14 +1401,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } @Override - public CreateErasureCodingZoneResponseProto createErasureCodingZone( - RpcController controller, CreateErasureCodingZoneRequestProto req) + public SetErasureCodingPolicyResponseProto setErasureCodingPolicy( + RpcController controller, SetErasureCodingPolicyRequestProto req) throws ServiceException { try { ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(req .getEcPolicy()) : null; - server.createErasureCodingZone(req.getSrc(), ecPolicy); - return CreateErasureCodingZoneResponseProto.newBuilder().build(); + server.setErasureCodingPolicy(req.getSrc(), ecPolicy); + return SetErasureCodingPolicyResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); } @@ -1554,13 +1553,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } @Override - public GetErasureCodingZoneResponseProto getErasureCodingZone(RpcController controller, - GetErasureCodingZoneRequestProto request) throws ServiceException { + public GetErasureCodingPolicyResponseProto getErasureCodingPolicy(RpcController controller, + GetErasureCodingPolicyRequestProto request) throws ServiceException { try { - ErasureCodingZone ecZone = server.getErasureCodingZone(request.getSrc()); - GetErasureCodingZoneResponseProto.Builder builder = GetErasureCodingZoneResponseProto.newBuilder(); - if (ecZone != null) { - builder.setECZone(PBHelper.convertErasureCodingZone(ecZone)); + ErasureCodingPolicy ecPolicy = server.getErasureCodingPolicy(request.getSrc()); + GetErasureCodingPolicyResponseProto.Builder builder = GetErasureCodingPolicyResponseProto.newBuilder(); + if (ecPolicy != null) { + builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy)); } return builder.build(); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index c8c468ecfca..841924464ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -168,9 +167,9 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathR import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingZoneResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; @@ -1420,17 +1419,17 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override - public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy) + public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy) throws IOException { - final CreateErasureCodingZoneRequestProto.Builder builder = - CreateErasureCodingZoneRequestProto.newBuilder(); + final SetErasureCodingPolicyRequestProto.Builder builder = + SetErasureCodingPolicyRequestProto.newBuilder(); builder.setSrc(src); if (ecPolicy != null) { builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy)); } - CreateErasureCodingZoneRequestProto req = builder.build(); + SetErasureCodingPolicyRequestProto req = builder.build(); try { - rpcProxy.createErasureCodingZone(null, req); + rpcProxy.setErasureCodingPolicy(null, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -1577,14 +1576,14 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override - public ErasureCodingZone getErasureCodingZone(String src) throws IOException { - GetErasureCodingZoneRequestProto req = GetErasureCodingZoneRequestProto.newBuilder() + public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException { + GetErasureCodingPolicyRequestProto req = GetErasureCodingPolicyRequestProto.newBuilder() .setSrc(src).build(); try { - GetErasureCodingZoneResponseProto response = rpcProxy.getErasureCodingZone( + GetErasureCodingPolicyResponseProto response = rpcProxy.getErasureCodingPolicy( null, req); - if (response.hasECZone()) { - return PBHelper.convertErasureCodingZone(response.getECZone()); + if (response.hasEcPolicy()) { + return PBHelper.convertErasureCodingPolicy(response.getEcPolicy()); } return null; } catch (ServiceException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index f419c46ef3e..ce39e152f96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -75,7 +75,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -132,7 +131,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterComm import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecoveryInfoProto; -import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ErasureCodingZoneProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto; @@ -2975,19 +2973,6 @@ public class PBHelper { .setCellSize(policy.getCellSize()); return builder.build(); } - - public static ErasureCodingZoneProto convertErasureCodingZone( - ErasureCodingZone ecZone) { - return ErasureCodingZoneProto.newBuilder().setDir(ecZone.getDir()) - .setEcPolicy(convertErasureCodingPolicy(ecZone.getErasureCodingPolicy())) - .build(); - } - - public static ErasureCodingZone convertErasureCodingZone( - ErasureCodingZoneProto ecZoneProto) { - return new ErasureCodingZone(ecZoneProto.getDir(), - convertErasureCodingPolicy(ecZoneProto.getEcPolicy())); - } public static BlockECRecoveryInfo convertBlockECRecoveryInfo( BlockECRecoveryInfoProto blockEcRecoveryInfoProto) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 8232ab9c9ff..3c1c4612e3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -55,7 +55,6 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -924,11 +923,9 @@ public class BlockManager implements BlockStatsMXBean { final boolean isFileUnderConstruction, final long offset, final long length, final boolean needBlockToken, final boolean inSnapshot, FileEncryptionInfo feInfo, - ErasureCodingZone ecZone) + ErasureCodingPolicy ecPolicy) throws IOException { assert namesystem.hasReadLock(); - final ErasureCodingPolicy ecPolicy = ecZone != null ? ecZone - .getErasureCodingPolicy() : null; if (blocks == null) { return null; } else if (blocks.length == 0) { @@ -1607,14 +1604,14 @@ public class BlockManager implements BlockStatsMXBean { assert rw instanceof ErasureCodingWork; assert rw.getTargets().length > 0; String src = getBlockCollection(block).getName(); - ErasureCodingZone ecZone = null; + ErasureCodingPolicy ecPolicy = null; try { - ecZone = namesystem.getErasureCodingZoneForPath(src); + ecPolicy = namesystem.getErasureCodingPolicyForPath(src); } catch (IOException e) { blockLog - .warn("Failed to get the EC zone for the file {} ", src); + .warn("Failed to get EC policy for the file {} ", src); } - if (ecZone == null) { + if (ecPolicy == null) { blockLog.warn("No erasure coding policy found for the file {}. " + "So cannot proceed for recovery", src); // TODO: we may have to revisit later for what we can do better to @@ -1624,8 +1621,7 @@ public class BlockManager implements BlockStatsMXBean { rw.getTargets()[0].getDatanodeDescriptor().addBlockToBeErasureCoded( new ExtendedBlock(namesystem.getBlockPoolId(), block), rw.getSrcNodes(), rw.getTargets(), - ((ErasureCodingWork) rw).getLiveBlockIndicies(), - ecZone.getErasureCodingPolicy()); + ((ErasureCodingWork) rw).getLiveBlockIndicies(), ecPolicy); } else { rw.getSrcNodes()[0].addBlockToBeReplicated(block, targets); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index da5271710d4..eb8ea8a0b21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -387,8 +387,8 @@ public interface HdfsServerConstants { "raw.hdfs.crypto.file.encryption.info"; String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER = "security.hdfs.unreadable.by.superuser"; - String XATTR_ERASURECODING_ZONE = - "raw.hdfs.erasurecoding.zone"; + String XATTR_ERASURECODING_POLICY = + "raw.hdfs.erasurecoding.policy"; long BLOCK_GROUP_INDEX_MASK = 15; byte MAX_BLOCKS_IN_GROUP = 16; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 71ac36a79bb..b77279b4f0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -83,7 +83,7 @@ public final class ErasureCodingPolicyManager { /** * Get system-wide default policy, which can be used by default - * when no policy is specified for an EC zone. + * when no policy is specified for a path. * @return ecPolicy */ public static ErasureCodingPolicy getSystemDefaultPolicy() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java deleted file mode 100644 index a0ac033b9be..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java +++ /dev/null @@ -1,163 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.namenode; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; - -import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.fs.XAttrSetFlag; -import org.apache.hadoop.hdfs.XAttrHelper; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.WritableUtils; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.EnumSet; -import java.util.List; - -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_ZONE; - -/** - * Manages the list of erasure coding zones in the filesystem. - *

- * The ErasureCodingZoneManager has its own lock, but relies on the FSDirectory - * lock being held for many operations. The FSDirectory lock should not be - * taken if the manager lock is already held. - * TODO: consolidate zone logic w/ encrypt. zones {@link EncryptionZoneManager} - */ -public class ErasureCodingZoneManager { - private final FSDirectory dir; - - /** - * Construct a new ErasureCodingZoneManager. - * - * @param dir Enclosing FSDirectory - */ - public ErasureCodingZoneManager(FSDirectory dir) { - this.dir = dir; - } - - ErasureCodingPolicy getErasureCodingPolicy(INodesInPath iip) throws IOException { - ErasureCodingZone ecZone = getErasureCodingZone(iip); - return ecZone == null ? null : ecZone.getErasureCodingPolicy(); - } - - ErasureCodingZone getErasureCodingZone(INodesInPath iip) throws IOException { - assert dir.hasReadLock(); - Preconditions.checkNotNull(iip, "INodes cannot be null"); - List inodes = iip.getReadOnlyINodes(); - for (int i = inodes.size() - 1; i >= 0; i--) { - final INode inode = inodes.get(i); - if (inode == null) { - continue; - } - // We don't allow symlinks in an EC zone, or pointing to a file/dir in - // an EC. Therefore if a symlink is encountered, the dir shouldn't have - // EC - // TODO: properly support symlinks in EC zones - if (inode.isSymlink()) { - return null; - } - final List xAttrs = inode.getXAttrFeature() == null ? - new ArrayList(0) - : inode.getXAttrFeature().getXAttrs(); - for (XAttr xAttr : xAttrs) { - if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixedName(xAttr))) { - ByteArrayInputStream bIn=new ByteArrayInputStream(xAttr.getValue()); - DataInputStream dIn=new DataInputStream(bIn); - String ecPolicyName = WritableUtils.readString(dIn); - ErasureCodingPolicy ecPolicy = dir.getFSNamesystem() - .getErasureCodingPolicyManager().getPolicy(ecPolicyName); - return new ErasureCodingZone(dir.getInode(inode.getId()) - .getFullPathName(), ecPolicy); - } - } - } - return null; - } - - List createErasureCodingZone(final INodesInPath srcIIP, - ErasureCodingPolicy ecPolicy) throws IOException { - assert dir.hasWriteLock(); - Preconditions.checkNotNull(srcIIP, "INodes cannot be null"); - String src = srcIIP.getPath(); - if (dir.isNonEmptyDirectory(srcIIP)) { - throw new IOException( - "Attempt to create an erasure coding zone for a " + - "non-empty directory " + src); - } - if (srcIIP.getLastINode() != null && - !srcIIP.getLastINode().isDirectory()) { - throw new IOException("Attempt to create an erasure coding zone " + - "for a file " + src); - } - if (getErasureCodingPolicy(srcIIP) != null) { - throw new IOException("Directory " + src + " is already in an " + - "erasure coding zone."); - } - - // System default erasure coding policy will be used since no specified. - if (ecPolicy == null) { - ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy(); - } - - final XAttr ecXAttr; - DataOutputStream dOut = null; - try { - ByteArrayOutputStream bOut = new ByteArrayOutputStream(); - dOut = new DataOutputStream(bOut); - WritableUtils.writeString(dOut, ecPolicy.getName()); - ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_ZONE, - bOut.toByteArray()); - } finally { - IOUtils.closeStream(dOut); - } - final List xattrs = Lists.newArrayListWithCapacity(1); - xattrs.add(ecXAttr); - FSDirXAttrOp.unprotectedSetXAttrs(dir, src, xattrs, - EnumSet.of(XAttrSetFlag.CREATE)); - return xattrs; - } - - void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src) - throws IOException { - assert dir.hasReadLock(); - final ErasureCodingZone srcZone = getErasureCodingZone(srcIIP); - final ErasureCodingZone dstZone = getErasureCodingZone(dstIIP); - if (srcZone != null && srcZone.getDir().equals(src) && dstZone == null) { - return; - } - final ErasureCodingPolicy srcECPolicy = - srcZone != null ? srcZone.getErasureCodingPolicy() : null; - final ErasureCodingPolicy dstECPolicy = - dstZone != null ? dstZone.getErasureCodingPolicy() : null; - if (srcECPolicy != null && !srcECPolicy.equals(dstECPolicy) || - dstECPolicy != null && !dstECPolicy.equals(srcECPolicy)) { - throw new IOException( - src + " can't be moved because the source and destination have " + - "different erasure coding policies."); - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 8c515d16416..4162760d2a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -17,14 +17,27 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; import java.io.IOException; +import java.util.ArrayList; +import java.util.EnumSet; import java.util.List; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.WritableUtils; + +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY; /** * Helper class to perform erasure coding related operations. @@ -38,18 +51,17 @@ final class FSDirErasureCodingOp { private FSDirErasureCodingOp() {} /** - * Create an erasure coding zone on directory src. + * Set an erasure coding policy on the given path. * - * @param fsn namespace - * @param srcArg the path of a directory which will be the root of the - * erasure coding zone. The directory must be empty. - * @param ecPolicy erasure coding policy for the erasure coding zone + * @param fsn The namespace + * @param srcArg The path of the target directory. + * @param ecPolicy The erasure coding policy to set on the target directory. * @param logRetryCache whether to record RPC ids in editlog for retry * cache rebuilding * @return {@link HdfsFileStatus} * @throws IOException */ - static HdfsFileStatus createErasureCodingZone(final FSNamesystem fsn, + static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn, final String srcArg, final ErasureCodingPolicy ecPolicy, final boolean logRetryCache) throws IOException { assert fsn.hasWriteLock(); @@ -66,8 +78,7 @@ final class FSDirErasureCodingOp { fsd.writeLock(); try { iip = fsd.getINodesInPath4Write(src, false); - xAttrs = fsn.getErasureCodingZoneManager().createErasureCodingZone( - iip, ecPolicy); + xAttrs = createErasureCodingPolicyXAttr(fsn, iip, ecPolicy); } finally { fsd.writeUnlock(); } @@ -75,62 +86,83 @@ final class FSDirErasureCodingOp { return fsd.getAuditFileInfo(iip); } + static List createErasureCodingPolicyXAttr(final FSNamesystem fsn, + final INodesInPath srcIIP, ErasureCodingPolicy ecPolicy) throws IOException { + FSDirectory fsd = fsn.getFSDirectory(); + assert fsd.hasWriteLock(); + Preconditions.checkNotNull(srcIIP, "INodes cannot be null"); + String src = srcIIP.getPath(); + if (srcIIP.getLastINode() != null && + !srcIIP.getLastINode().isDirectory()) { + throw new IOException("Attempt to set an erasure coding policy " + + "for a file " + src); + } + if (getErasureCodingPolicyForPath(fsn, srcIIP) != null) { + throw new IOException("Directory " + src + " already has an " + + "erasure coding policy."); + } + + // System default erasure coding policy will be used since no specified. + if (ecPolicy == null) { + ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy(); + } + + final XAttr ecXAttr; + DataOutputStream dOut = null; + try { + ByteArrayOutputStream bOut = new ByteArrayOutputStream(); + dOut = new DataOutputStream(bOut); + WritableUtils.writeString(dOut, ecPolicy.getName()); + ecXAttr = XAttrHelper.buildXAttr(XATTR_ERASURECODING_POLICY, + bOut.toByteArray()); + } finally { + IOUtils.closeStream(dOut); + } + final List xattrs = Lists.newArrayListWithCapacity(1); + xattrs.add(ecXAttr); + FSDirXAttrOp.unprotectedSetXAttrs(fsd, src, xattrs, + EnumSet.of(XAttrSetFlag.CREATE)); + return xattrs; + } + /** - * Get the erasure coding zone information for specified path. + * Get the erasure coding policy information for specified path. * * @param fsn namespace * @param src path - * @return {@link ErasureCodingZone} + * @return {@link ErasureCodingPolicy} * @throws IOException */ - static ErasureCodingZone getErasureCodingZone(final FSNamesystem fsn, + static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn, final String src) throws IOException { assert fsn.hasReadLock(); final INodesInPath iip = getINodesInPath(fsn, src); - return getErasureCodingZoneForPath(fsn, iip); + return getErasureCodingPolicyForPath(fsn, iip); } /** - * Get erasure coding zone information for specified path. - * - * @param fsn namespace - * @param iip inodes in the path containing the file - * @return {@link ErasureCodingZone} - * @throws IOException - */ - static ErasureCodingZone getErasureCodingZone(final FSNamesystem fsn, - final INodesInPath iip) throws IOException { - assert fsn.hasReadLock(); - - return getErasureCodingZoneForPath(fsn, iip); - } - - /** - * Check if the file is in erasure coding zone. + * Check if the file or directory has an erasure coding policy. * * @param fsn namespace * @param srcArg path - * @return true represents the file is in erasure coding zone, false otw + * @return Whether the file or directory has an erasure coding policy. * @throws IOException */ - static boolean isInErasureCodingZone(final FSNamesystem fsn, + static boolean hasErasureCodingPolicy(final FSNamesystem fsn, final String srcArg) throws IOException { - assert fsn.hasReadLock(); - - final INodesInPath iip = getINodesInPath(fsn, srcArg); - return getErasureCodingPolicyForPath(fsn, iip) != null; + return hasErasureCodingPolicy(fsn, getINodesInPath(fsn, srcArg)); } /** - * Check if the file is in erasure coding zone. + * Check if the file or directory has an erasure coding policy. * * @param fsn namespace * @param iip inodes in the path containing the file - * @return true represents the file is in erasure coding zone, false otw + * @return Whether the file or directory has an erasure coding policy. * @throws IOException */ - static boolean isInErasureCodingZone(final FSNamesystem fsn, + static boolean hasErasureCodingPolicy(final FSNamesystem fsn, final INodesInPath iip) throws IOException { return getErasureCodingPolicy(fsn, iip) != null; } @@ -178,25 +210,46 @@ final class FSDirErasureCodingOp { return iip; } - private static ErasureCodingZone getErasureCodingZoneForPath( - final FSNamesystem fsn, final INodesInPath iip) throws IOException { - final FSDirectory fsd = fsn.getFSDirectory(); + private static ErasureCodingPolicy getErasureCodingPolicyForPath(FSNamesystem fsn, + INodesInPath iip) throws IOException { + Preconditions.checkNotNull(iip, "INodes cannot be null"); + FSDirectory fsd = fsn.getFSDirectory(); fsd.readLock(); try { - return fsn.getErasureCodingZoneManager().getErasureCodingZone(iip); - } finally { - fsd.readUnlock(); - } - } - - private static ErasureCodingPolicy getErasureCodingPolicyForPath(final FSNamesystem fsn, - final INodesInPath iip) throws IOException { - final FSDirectory fsd = fsn.getFSDirectory(); - fsd.readLock(); - try { - return fsn.getErasureCodingZoneManager().getErasureCodingPolicy(iip); + List inodes = iip.getReadOnlyINodes(); + for (int i = inodes.size() - 1; i >= 0; i--) { + final INode inode = inodes.get(i); + if (inode == null) { + continue; + } + /** + * TODO: lookup {@link ErasureCodingPolicyManager#getSystemPolices()} + */ + if (inode.isFile()) { + return inode.asFile().getErasureCodingPolicyID() == 0 ? + null : ErasureCodingPolicyManager.getSystemDefaultPolicy(); + } + // We don't allow setting EC policies on paths with a symlink. Thus + // if a symlink is encountered, the dir shouldn't have EC policy. + // TODO: properly support symlinks + if (inode.isSymlink()) { + return null; + } + final XAttrFeature xaf = inode.getXAttrFeature(); + if (xaf != null) { + XAttr xattr = xaf.getXAttr(XATTR_ERASURECODING_POLICY); + if (xattr != null) { + ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue()); + DataInputStream dIn = new DataInputStream(bIn); + String ecPolicyName = WritableUtils.readString(dIn); + return fsd.getFSNamesystem().getErasureCodingPolicyManager(). + getPolicy(ecPolicyName); + } + } + } } finally { fsd.readUnlock(); } + return null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index ccdef1f3e26..b01570848a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -185,7 +185,6 @@ class FSDirRenameOp { } fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src); - fsd.ecZoneManager.checkMoveValidity(srcIIP, dstIIP, src); // Ensure dst has quota to accommodate rename verifyFsLimitsForRename(fsd, srcIIP, dstIIP); verifyQuotaForRename(fsd, srcIIP, dstIIP); @@ -358,7 +357,6 @@ class FSDirRenameOp { BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite(); fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src); - fsd.ecZoneManager.checkMoveValidity(srcIIP, dstIIP, src); final INode dstInode = dstIIP.getLastINode(); List snapshottableDirs = new ArrayList<>(); if (dstInode != null) { // Destination exists diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 6ec97c90a96..e178c685d25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -182,12 +181,12 @@ class FSDirStatAndListingOp { final FileEncryptionInfo feInfo = isReservedName ? null : fsd.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip); - final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone( - fsd.getFSNamesystem(), iip); + final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp. + getErasureCodingPolicy(fsd.getFSNamesystem(), iip); final LocatedBlocks blocks = bm.createLocatedBlocks( inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, - length, needBlockToken, iip.isSnapshot(), feInfo, ecZone); + length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy); // Set caching information for the located blocks. for (LocatedBlock lb : blocks.getLocatedBlocks()) { @@ -447,10 +446,8 @@ class FSDirStatAndListingOp { final FileEncryptionInfo feInfo = isRawPath ? null : fsd.getFileEncryptionInfo(node, snapshot, iip); - final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone( + final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( fsd.getFSNamesystem(), iip); - final ErasureCodingPolicy ecPolicy = - ecZone != null ? ecZone.getErasureCodingPolicy() : null; if (node.isFile()) { final INodeFile fileNode = node.asFile(); @@ -505,7 +502,7 @@ class FSDirStatAndListingOp { final boolean isEncrypted; final FileEncryptionInfo feInfo = isRawPath ? null : fsd.getFileEncryptionInfo(node, snapshot, iip); - final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone( + final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( fsd.getFSNamesystem(), iip); if (node.isFile()) { final INodeFile fileNode = node.asFile(); @@ -520,7 +517,7 @@ class FSDirStatAndListingOp { loc = fsd.getBlockManager().createLocatedBlocks( fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false, - inSnapshot, feInfo, ecZone); + inSnapshot, feInfo, ecPolicy); if (loc == null) { loc = new LocatedBlocks(); } @@ -531,8 +528,6 @@ class FSDirStatAndListingOp { } int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - final ErasureCodingPolicy ecPolicy = - ecZone != null ? ecZone.getErasureCodingPolicy() : null; HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.isDirectory(), replication, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index e480959d037..867b4515469 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.EncryptionZone; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -486,8 +485,8 @@ class FSDirWriteFileOp { Preconditions.checkNotNull(existing); assert fsd.hasWriteLock(); try { - // check if the file is in an EC zone - final boolean isStriped = FSDirErasureCodingOp.isInErasureCodingZone( + // check if the file has an EC policy + final boolean isStriped = FSDirErasureCodingOp.hasErasureCodingPolicy( fsd.getFSNamesystem(), existing); if (underConstruction) { newNode = newINodeFile(id, permissions, modificationTime, @@ -533,9 +532,8 @@ class FSDirWriteFileOp { // associate new last block for the file final BlockInfo blockInfo; if (isStriped) { - ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone( + ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( fsd.getFSNamesystem(), inodesInPath); - ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy(); short numDataUnits = (short) ecPolicy.getNumDataUnits(); short numParityUnits = (short) ecPolicy.getNumParityUnits(); short numLocations = (short) (numDataUnits + numParityUnits); @@ -586,7 +584,7 @@ class FSDirWriteFileOp { INodesInPath newiip; fsd.writeLock(); try { - final boolean isStriped = FSDirErasureCodingOp.isInErasureCodingZone( + final boolean isStriped = FSDirErasureCodingOp.hasErasureCodingPolicy( fsd.getFSNamesystem(), existing); INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions, modTime, modTime, replication, preferredBlockSize, isStriped); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c9fb6cd9226..6538aba77f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -215,9 +215,6 @@ public class FSDirectory implements Closeable { @VisibleForTesting public final EncryptionZoneManager ezManager; - @VisibleForTesting - public final ErasureCodingZoneManager ecZoneManager; - /** * Caches frequently used file names used in {@link INode} to reuse * byte[] objects and reduce heap usage. @@ -314,7 +311,6 @@ public class FSDirectory implements Closeable { namesystem = ns; this.editLog = ns.getEditLog(); ezManager = new EncryptionZoneManager(this, conf); - ecZoneManager = new ErasureCodingZoneManager(this); } FSNamesystem getFSNamesystem() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 68324f3f358..169154885c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -36,7 +36,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.XAttrSetFlag; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; @@ -416,9 +416,9 @@ public class FSEditLogLoader { // Update the salient file attributes. newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); - ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone( + ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( fsDir.getFSNamesystem(), iip); - updateBlocks(fsDir, addCloseOp, iip, newFile, ecZone); + updateBlocks(fsDir, addCloseOp, iip, newFile, ecPolicy); break; } case OP_CLOSE: { @@ -438,9 +438,9 @@ public class FSEditLogLoader { // Update the salient file attributes. file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); - ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone( + ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( fsDir.getFSNamesystem(), iip); - updateBlocks(fsDir, addCloseOp, iip, file, ecZone); + updateBlocks(fsDir, addCloseOp, iip, file, ecPolicy); // Now close the file if (!file.isUnderConstruction() && @@ -498,9 +498,9 @@ public class FSEditLogLoader { INodesInPath iip = fsDir.getINodesInPath(path, true); INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); // Update in-memory data structures - ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone( + ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( fsDir.getFSNamesystem(), iip); - updateBlocks(fsDir, updateOp, iip, oldFile, ecZone); + updateBlocks(fsDir, updateOp, iip, oldFile, ecPolicy); if (toAddRetryCache) { fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId); @@ -517,9 +517,9 @@ public class FSEditLogLoader { INodesInPath iip = fsDir.getINodesInPath(path, true); INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); // add the new block to the INodeFile - ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone( + ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy( fsDir.getFSNamesystem(), iip); - addNewBlock(addBlockOp, oldFile, ecZone); + addNewBlock(addBlockOp, oldFile, ecPolicy); break; } case OP_SET_REPLICATION: { @@ -961,7 +961,7 @@ public class FSEditLogLoader { * Add a new block into the given INodeFile */ private void addNewBlock(AddBlockOp op, INodeFile file, - ErasureCodingZone ecZone) throws IOException { + ErasureCodingPolicy ecPolicy) throws IOException { BlockInfo[] oldBlocks = file.getBlocks(); Block pBlock = op.getPenultimateBlock(); Block newBlock= op.getLastBlock(); @@ -988,10 +988,9 @@ public class FSEditLogLoader { } // add the new block final BlockInfo newBlockInfo; - boolean isStriped = ecZone != null; + boolean isStriped = ecPolicy != null; if (isStriped) { - newBlockInfo = new BlockInfoStriped(newBlock, - ecZone.getErasureCodingPolicy()); + newBlockInfo = new BlockInfoStriped(newBlock, ecPolicy); } else { newBlockInfo = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication()); @@ -1008,7 +1007,7 @@ public class FSEditLogLoader { * @throws IOException */ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, - INodesInPath iip, INodeFile file, ErasureCodingZone ecZone) + INodesInPath iip, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException { // Update its block list BlockInfo[] oldBlocks = file.getBlocks(); @@ -1068,7 +1067,7 @@ public class FSEditLogLoader { throw new IOException("Trying to delete non-existant block " + oldBlock); } } else if (newBlocks.length > oldBlocks.length) { - final boolean isStriped = ecZone != null; + final boolean isStriped = ecPolicy != null; // We're adding blocks for (int i = oldBlocks.length; i < newBlocks.length; i++) { Block newBlock = newBlocks[i]; @@ -1078,8 +1077,7 @@ public class FSEditLogLoader { // what about an old-version fsync() where fsync isn't called // until several blocks in? if (isStriped) { - newBI = new BlockInfoStriped(newBlock, - ecZone.getErasureCodingPolicy()); + newBI = new BlockInfoStriped(newBlock, ecPolicy); } else { newBI = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 5f394460ecb..ed52ca447b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -177,7 +177,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.EncryptionZone; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -187,7 +186,6 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; -import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.RollingUpgradeException; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; @@ -2133,7 +2131,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, readLock(); try { checkOperation(OperationCategory.READ); - if (!FSDirErasureCodingOp.isInErasureCodingZone(this, src)) { + if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, src)) { blockManager.verifyReplication(src, replication, clientMachine); } } finally { @@ -3206,9 +3204,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, final long diff; final short replicationFactor; if (fileINode.isStriped()) { - final ErasureCodingZone ecZone = FSDirErasureCodingOp - .getErasureCodingZone(this, iip); - final ErasureCodingPolicy ecPolicy = ecZone.getErasureCodingPolicy(); + final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp + .getErasureCodingPolicy(this, iip); final short numDataUnits = (short) ecPolicy.getNumDataUnits(); final short numParityUnits = (short) ecPolicy.getNumParityUnits(); @@ -6241,11 +6238,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return ecPolicyManager; } - /** @return the ErasureCodingZoneManager. */ - public ErasureCodingZoneManager getErasureCodingZoneManager() { - return dir.ecZoneManager; - } - @Override // NameNodeMXBean public String getCorruptFiles() { List list = new ArrayList(); @@ -7192,15 +7184,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } /** - * Create an erasure coding zone on directory src. - * @param srcArg the path of a directory which will be the root of the - * erasure coding zone. The directory must be empty. - * @param ecPolicy erasure coding policy for the erasure coding zone + * Set an erasure coding policy on the given path. + * @param srcArg The path of the target directory. + * @param ecPolicy The erasure coding policy to set on the target directory. * @throws AccessControlException if the caller is not the superuser. * @throws UnresolvedLinkException if the path can't be resolved. * @throws SafeModeException if the Namenode is in safe mode. */ - void createErasureCodingZone(final String srcArg, final ErasureCodingPolicy + void setErasureCodingPolicy(final String srcArg, final ErasureCodingPolicy ecPolicy, final boolean logRetryCache) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException { checkSuperuserPrivilege(); @@ -7210,8 +7201,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, writeLock(); try { checkOperation(OperationCategory.WRITE); - checkNameNodeSafeMode("Cannot create erasure coding zone on " + srcArg); - resultingStat = FSDirErasureCodingOp.createErasureCodingZone(this, + checkNameNodeSafeMode("Cannot set erasure coding policy on " + srcArg); + resultingStat = FSDirErasureCodingOp.setErasureCodingPolicy(this, srcArg, ecPolicy, logRetryCache); success = true; } finally { @@ -7219,21 +7210,21 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, if (success) { getEditLog().logSync(); } - logAuditEvent(success, "createErasureCodingZone", srcArg, null, + logAuditEvent(success, "setErasureCodingPolicy", srcArg, null, resultingStat); } } /** - * Get the erasure coding zone information for specified path + * Get the erasure coding policy information for specified path */ - ErasureCodingZone getErasureCodingZone(String src) + ErasureCodingPolicy getErasureCodingPolicy(String src) throws AccessControlException, UnresolvedLinkException, IOException { checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); - return getErasureCodingZoneForPath(src); + return getErasureCodingPolicyForPath(src); } finally { readUnlock(); } @@ -7461,9 +7452,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override - public ErasureCodingZone getErasureCodingZoneForPath(String src) + public ErasureCodingPolicy getErasureCodingPolicyForPath(String src) throws IOException { - return FSDirErasureCodingOp.getErasureCodingZone(this, src); + return FSDirErasureCodingOp.getErasureCodingPolicy(this, src); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index ae9b0d2e7f3..6f7b702bb5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -437,6 +437,20 @@ public class INodeFile extends INodeWithAdditionalFields setStoragePolicyID(storagePolicyId); } + + /** + * @return The ID of the erasure coding policy on the file. 0 represents no + * EC policy (file is in contiguous format). 1 represents the system + * default EC policy: + * {@link ErasureCodingPolicyManager#SYS_DEFAULT_POLICY}. + * TODO: support more policies by reusing {@link HeaderFormat#REPLICATION}. + */ + @VisibleForTesting + @Override + public byte getErasureCodingPolicyID() { + return isStriped() ? (byte)1 : (byte)0; + } + /** * @return true if the file is in the striping layout. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java index 13bd9e962e3..8cd481a68e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; -import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; + /** * The attributes of a file. */ @@ -32,6 +32,9 @@ public interface INodeFileAttributes extends INodeAttributes { /** @return whether the file is striped (instead of contiguous) */ public boolean isStriped(); + /** @return whether the file is striped (instead of contiguous) */ + public byte getErasureCodingPolicyID(); + /** @return preferred block size in bytes */ public long getPreferredBlockSize(); @@ -77,6 +80,11 @@ public interface INodeFileAttributes extends INodeAttributes { return HeaderFormat.isStriped(header); } + @Override + public byte getErasureCodingPolicyID() { + return isStriped() ? (byte)1 : (byte)0; + } + @Override public long getPreferredBlockSize() { return HeaderFormat.getPreferredBlockSize(header); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index ab29e4de634..41439641a81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -85,7 +85,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSLimitException; @@ -1423,8 +1422,8 @@ class NameNodeRpcServer implements NamenodeProtocols { @Override // RefreshAuthorizationPolicyProtocol public void refreshUserToGroupsMappings() throws IOException { - LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + - getRemoteUser().getShortUserName()); + LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + + getRemoteUser().getShortUserName()); Groups.getUserToGroupsMappingService().refresh(); } @@ -1557,7 +1556,7 @@ class NameNodeRpcServer implements NamenodeProtocols { } namesystem.checkOperation(OperationCategory.WRITE); CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, - null); + null); if (cacheEntry != null && cacheEntry.isSuccess()) { return (String) cacheEntry.getPayload(); } @@ -1849,7 +1848,7 @@ class NameNodeRpcServer implements NamenodeProtocols { } @Override // ClientProtocol - public void createErasureCodingZone(String src, ErasureCodingPolicy ecPolicy) + public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy) throws IOException { checkNNStartup(); final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); @@ -1858,7 +1857,7 @@ class NameNodeRpcServer implements NamenodeProtocols { } boolean success = false; try { - namesystem.createErasureCodingZone(src, ecPolicy, cacheEntry != null); + namesystem.setErasureCodingPolicy(src, ecPolicy, cacheEntry != null); success = true; } finally { RetryCache.setState(cacheEntry, success); @@ -2068,8 +2067,8 @@ class NameNodeRpcServer implements NamenodeProtocols { } @Override // ClientProtocol - public ErasureCodingZone getErasureCodingZone(String src) throws IOException { + public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException { checkNNStartup(); - return namesystem.getErasureCodingZone(src); + return namesystem.getErasureCodingPolicy(src); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index 923a33536c4..e1702d9e5c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; @@ -52,16 +52,16 @@ public interface Namesystem extends RwLock, SafeMode { void checkOperation(OperationCategory read) throws StandbyException; /** - * Gets the ECZone for path + * Gets the erasure coding policy for the path * @param src * - path - * @return {@link ErasureCodingZone} + * @return {@link ErasureCodingPolicy} * @throws IOException */ - ErasureCodingZone getErasureCodingZoneForPath(String src) + ErasureCodingPolicy getErasureCodingPolicyForPath(String src) throws IOException; boolean isInSnapshot(BlockInfo blockUC); CacheManager getCacheManager(); -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java index f3260dab48b..b857d3e6635 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.CommandFactory; import org.apache.hadoop.fs.shell.PathData; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.util.StringUtils; @@ -45,9 +44,9 @@ public abstract class ECCommand extends Command { public static void registerCommands(CommandFactory factory) { // Register all commands of Erasure CLI, with a '-' at the beginning in name // of the command. - factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME); - factory.addClass(GetECZoneCommand.class, "-" - + GetECZoneCommand.NAME); + factory.addClass(SetECPolicyCommand.class, "-" + SetECPolicyCommand.NAME); + factory.addClass(GetECPolicyCommand.class, "-" + + GetECPolicyCommand.NAME); factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME); } @@ -76,17 +75,18 @@ public abstract class ECCommand extends Command { } /** - * A command to create an EC zone for a path, with a erasure coding policy name. + * A command to set the erasure coding policy for a directory, with the name + * of the policy. */ - static class CreateECZoneCommand extends ECCommand { - public static final String NAME = "createZone"; + static class SetECPolicyCommand extends ECCommand { + public static final String NAME = "setPolicy"; public static final String USAGE = "[-s ] "; public static final String DESCRIPTION = - "Create a zone to encode files using a specified policy\n" + "Set a specified erasure coding policy to a directory\n" + "Options :\n" + " -s : erasure coding policy name to encode files. " + "If not passed the default policy will be used\n" - + " : Path to an empty directory. Under this directory " + + " : Path to a directory. Under this directory " + "files will be encoded using specified erasure coding policy"; private String ecPolicyName; private ErasureCodingPolicy ecPolicy = null; @@ -129,23 +129,23 @@ public abstract class ECCommand extends Command { throw new HadoopIllegalArgumentException(sb.toString()); } } - dfs.createErasureCodingZone(item.path, ecPolicy); - out.println("EC Zone created successfully at " + item.path); + dfs.setErasureCodingPolicy(item.path, ecPolicy); + out.println("EC policy set successfully at " + item.path); } catch (IOException e) { - throw new IOException("Unable to create EC zone for the path " + throw new IOException("Unable to set EC policy for the path " + item.path + ". " + e.getMessage()); } } } /** - * Get the information about the zone + * Get the erasure coding policy of a file or directory */ - static class GetECZoneCommand extends ECCommand { - public static final String NAME = "getZone"; + static class GetECPolicyCommand extends ECCommand { + public static final String NAME = "getPolicy"; public static final String USAGE = ""; public static final String DESCRIPTION = - "Get information about the EC zone at specified path\n"; + "Get erasure coding policy information about at specified path\n"; @Override protected void processOptions(LinkedList args) throws IOException { @@ -162,14 +162,14 @@ public abstract class ECCommand extends Command { super.processPath(item); DistributedFileSystem dfs = (DistributedFileSystem) item.fs; try { - ErasureCodingZone ecZone = dfs.getErasureCodingZone(item.path); - if (ecZone != null) { - out.println(ecZone.toString()); + ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(item.path); + if (ecPolicy != null) { + out.println(ecPolicy.toString()); } else { - out.println("Path " + item.path + " is not in EC zone"); + out.println("Path " + item.path + " is not erasure coded."); } } catch (IOException e) { - throw new IOException("Unable to get EC zone for the path " + throw new IOException("Unable to get EC policy for the path " + item.path + ". " + e.getMessage()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 6500b963871..3551055cdf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1892,12 +1892,12 @@ public class DFSTestUtil { public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir) throws Exception { DistributedFileSystem dfs = cluster.getFileSystem(); - // If outer test already created EC zone, dir should be left as null + // If outer test already set EC policy, dir should be left as null if (toMkdir) { assert dir != null; dfs.mkdirs(dir); try { - dfs.getClient().createErasureCodingZone(dir.toString(), null); + dfs.getClient().setErasureCodingPolicy(dir.toString(), null); } catch (IOException e) { if (!e.getMessage().contains("non-empty directory")) { throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index a09f0f0ccfa..6fcf644dd8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -80,7 +80,7 @@ public class TestDFSStripedInputStream { } fs = cluster.getFileSystem(); fs.mkdirs(dirPath); - fs.getClient().createErasureCodingZone(dirPath.toString(), null); + fs.getClient().setErasureCodingPolicy(dirPath.toString(), null); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index a467f40d8b6..4263ffaf289 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -68,7 +68,7 @@ public class TestDFSStripedOutputStream { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java index 2aa8484419b..afb7f95cbb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java @@ -118,7 +118,7 @@ public class TestDFSStripedOutputStreamWithFailure { cluster.waitActive(); dfs = cluster.getFileSystem(); dfs.mkdirs(dir); - dfs.createErasureCodingZone(dir, null); + dfs.setErasureCodingPolicy(dir, null); } private void tearDown() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java similarity index 67% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index b68aab99563..f60d77d8bd5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -35,7 +35,7 @@ import java.io.IOException; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.Assert.*; -public class TestErasureCodingZones { +public class TestErasureCodingPolicies { private Configuration conf; private MiniDFSCluster cluster; private DistributedFileSystem fs; @@ -59,52 +59,57 @@ public class TestErasureCodingZones { } @Test - public void testCreateECZone() + public void testBasicSetECPolicy() throws IOException, InterruptedException { final Path testDir = new Path("/ec"); fs.mkdir(testDir, FsPermission.getDirDefault()); - /* Normal creation of an erasure coding zone */ - fs.getClient().createErasureCodingZone(testDir.toString(), null); + /* Normal creation of an erasure coding directory */ + fs.getClient().setErasureCodingPolicy(testDir.toString(), null); - /* Verify files under the zone are striped */ + /* Verify files under the directory are striped */ final Path ECFilePath = new Path(testDir, "foo"); fs.create(ECFilePath); INode inode = namesystem.getFSDirectory().getINode(ECFilePath.toString()); assertTrue(inode.asFile().isStriped()); - /* Verify that EC zone cannot be created on non-empty dir */ + /** + * Verify that setting EC policy on non-empty directory only affects + * newly created files under the directory. + */ final Path notEmpty = new Path("/nonEmpty"); fs.mkdir(notEmpty, FsPermission.getDirDefault()); - fs.create(new Path(notEmpty, "foo")); + final Path oldFile = new Path(notEmpty, "old"); + fs.create(oldFile); + fs.getClient().setErasureCodingPolicy(notEmpty.toString(), null); + final Path newFile = new Path(notEmpty, "new"); + fs.create(newFile); + INode oldInode = namesystem.getFSDirectory().getINode(oldFile.toString()); + assertFalse(oldInode.asFile().isStriped()); + INode newInode = namesystem.getFSDirectory().getINode(newFile.toString()); + assertTrue(newInode.asFile().isStriped()); + + /* Verify that nested EC policies not supported */ + final Path dir1 = new Path("/dir1"); + final Path dir2 = new Path(dir1, "dir2"); + fs.mkdir(dir1, FsPermission.getDirDefault()); + fs.getClient().setErasureCodingPolicy(dir1.toString(), null); + fs.mkdir(dir2, FsPermission.getDirDefault()); try { - fs.getClient().createErasureCodingZone(notEmpty.toString(), null); - fail("Erasure coding zone on non-empty dir"); + fs.getClient().setErasureCodingPolicy(dir2.toString(), null); + fail("Nested erasure coding policies"); } catch (IOException e) { - assertExceptionContains("erasure coding zone for a non-empty directory", e); + assertExceptionContains("already has an erasure coding policy", e); } - /* Verify that nested EC zones cannot be created */ - final Path zone1 = new Path("/zone1"); - final Path zone2 = new Path(zone1, "zone2"); - fs.mkdir(zone1, FsPermission.getDirDefault()); - fs.getClient().createErasureCodingZone(zone1.toString(), null); - fs.mkdir(zone2, FsPermission.getDirDefault()); - try { - fs.getClient().createErasureCodingZone(zone2.toString(), null); - fail("Nested erasure coding zones"); - } catch (IOException e) { - assertExceptionContains("already in an erasure coding zone", e); - } - - /* Verify that EC zone cannot be created on a file */ + /* Verify that EC policy cannot be set on a file */ final Path fPath = new Path("/file"); fs.create(fPath); try { - fs.getClient().createErasureCodingZone(fPath.toString(), null); - fail("Erasure coding zone on file"); + fs.getClient().setErasureCodingPolicy(fPath.toString(), null); + fail("Erasure coding policy on file"); } catch (IOException e) { - assertExceptionContains("erasure coding zone for a file", e); + assertExceptionContains("erasure coding policy for a file", e); } } @@ -114,8 +119,8 @@ public class TestErasureCodingZones { final Path dstECDir = new Path("/dstEC"); fs.mkdir(srcECDir, FsPermission.getDirDefault()); fs.mkdir(dstECDir, FsPermission.getDirDefault()); - fs.getClient().createErasureCodingZone(srcECDir.toString(), null); - fs.getClient().createErasureCodingZone(dstECDir.toString(), null); + fs.getClient().setErasureCodingPolicy(srcECDir.toString(), null); + fs.getClient().setErasureCodingPolicy(dstECDir.toString(), null); final Path srcFile = new Path(srcECDir, "foo"); fs.create(srcFile); @@ -130,37 +135,26 @@ public class TestErasureCodingZones { fs.rename(new Path("/dstEC/srcEC"), srcECDir); // move back // Test move file - /* Verify that a file can be moved between 2 EC zones */ + /* Verify that a file can be moved between 2 EC dirs */ fs.rename(srcFile, dstECDir); fs.rename(new Path(dstECDir, "foo"), srcECDir); // move back - /* Verify that a file cannot be moved from a non-EC dir to an EC zone */ + /* Verify that a file can be moved from a non-EC dir to an EC dir */ final Path nonECDir = new Path("/nonEC"); fs.mkdir(nonECDir, FsPermission.getDirDefault()); - try { - fs.rename(srcFile, nonECDir); - fail("A file shouldn't be able to move from a non-EC dir to an EC zone"); - } catch (IOException e) { - assertExceptionContains("can't be moved because the source and " + - "destination have different erasure coding policies", e); - } + fs.rename(srcFile, nonECDir); - /* Verify that a file cannot be moved from an EC zone to a non-EC dir */ + /* Verify that a file can be moved from an EC dir to a non-EC dir */ final Path nonECFile = new Path(nonECDir, "nonECFile"); fs.create(nonECFile); - try { - fs.rename(nonECFile, dstECDir); - } catch (IOException e) { - assertExceptionContains("can't be moved because the source and " + - "destination have different erasure coding policies", e); - } + fs.rename(nonECFile, dstECDir); } @Test public void testReplication() throws IOException { final Path testDir = new Path("/ec"); fs.mkdir(testDir, FsPermission.getDirDefault()); - fs.createErasureCodingZone(testDir, null); + fs.setErasureCodingPolicy(testDir, null); final Path fooFile = new Path(testDir, "foo"); // create ec file with replication=0 fs.create(fooFile, FsPermission.getFileDefault(), true, @@ -171,23 +165,23 @@ public class TestErasureCodingZones { } @Test - public void testGetErasureCodingInfoWithSystemDefaultECPolicy() throws Exception { + public void testGetErasureCodingPolicyWithSystemDefaultECPolicy() throws Exception { String src = "/ec"; final Path ecDir = new Path(src); fs.mkdir(ecDir, FsPermission.getDirDefault()); - // dir ECInfo before creating ec zone + // dir EC policy should be null assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy()); - // dir ECInfo after creating ec zone - fs.getClient().createErasureCodingZone(src, null); //Default one will be used. + // dir EC policy after setting + fs.getClient().setErasureCodingPolicy(src, null); //Default one will be used. ErasureCodingPolicy sysDefaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy(); verifyErasureCodingInfo(src, sysDefaultECPolicy); fs.create(new Path(ecDir, "child1")).close(); - // verify for the files in ec zone + // verify for the files in ec dir verifyErasureCodingInfo(src + "/child1", sysDefaultECPolicy); } @Test - public void testGetErasureCodingInfo() throws Exception { + public void testGetErasureCodingPolicy() throws Exception { ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager.getSystemPolices(); assertTrue("System ecPolicies should be of only 1 for now", sysECPolicies.length == 1); @@ -196,13 +190,13 @@ public class TestErasureCodingZones { String src = "/ec2"; final Path ecDir = new Path(src); fs.mkdir(ecDir, FsPermission.getDirDefault()); - // dir ECInfo before creating ec zone + // dir ECInfo before being set assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy()); - // dir ECInfo after creating ec zone - fs.getClient().createErasureCodingZone(src, usingECPolicy); + // dir ECInfo after set + fs.getClient().setErasureCodingPolicy(src, usingECPolicy); verifyErasureCodingInfo(src, usingECPolicy); fs.create(new Path(ecDir, "child1")).close(); - // verify for the files in ec zone + // verify for the files in ec dir verifyErasureCodingInfo(src + "/child1", usingECPolicy); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java index 4610ced5460..64063d20eb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java @@ -38,24 +38,24 @@ public class TestFileStatusWithECPolicy { @Test public void testFileStatusWithECPolicy() throws Exception { - // test directory not in EC zone + // test directory doesn't have an EC policy final Path dir = new Path("/foo"); assertTrue(fs.mkdir(dir, FsPermission.getDirDefault())); assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy()); - // test file not in EC zone + // test file doesn't have an EC policy final Path file = new Path(dir, "foo"); fs.create(file).close(); assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy()); fs.delete(file, true); final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy(); - // create EC zone on dir - fs.createErasureCodingZone(dir, ecPolicy1); + // set EC policy on dir + fs.setErasureCodingPolicy(dir, ecPolicy1); final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy(); assertNotNull(ecPolicy2); assertTrue(ecPolicy1.equals(ecPolicy2)); - // test file in EC zone + // test file doesn't have an EC policy fs.create(file).close(); final ErasureCodingPolicy ecPolicy3 = fs.getClient().getFileInfo(file.toUri().getPath()).getErasureCodingPolicy(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java index 9048fbdfddb..68cd25edeac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java @@ -68,7 +68,7 @@ public class TestReadStripedFileWithDecoding { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) .numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java index 08a5f585910..ca376afa4a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java @@ -53,7 +53,7 @@ public class TestReadStripedFileWithMissingBlocks { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java index 7a0851f4620..b58184552ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java @@ -78,7 +78,7 @@ public class TestRecoverStripedFile { cluster.waitActive(); fs = cluster.getFileSystem(); - fs.getClient().createErasureCodingZone("/", null); + fs.getClient().setErasureCodingPolicy("/", null); List datanodes = cluster.getDataNodes(); for (int i = 0; i < dnNum; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java index f577ddba280..318eb9fee76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java @@ -54,7 +54,7 @@ public class TestSafeModeWithStripedFile { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); cluster.waitActive(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index b383c85889c..5381fcabeab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -57,7 +57,7 @@ public class TestWriteReadStripedFile { public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java index deffbcc6a76..50f98a3d1f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java @@ -30,7 +30,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.cellSize; import static org.apache.hadoop.hdfs.StripedFileTestUtil.dataBlocks; import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs; import static org.apache.hadoop.hdfs.StripedFileTestUtil.parityBlocks; @@ -48,7 +47,7 @@ public class TestWriteStripedFileWithFailure { public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } @@ -158,4 +157,4 @@ public class TestWriteStripedFileWithFailure { throw new IOException("Failed at i=" + i, e); } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index eb24fb0f9c6..21e60c8f62b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -82,7 +82,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; @@ -1702,7 +1701,7 @@ public class TestBalancer { cluster.waitActive(); client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); - client.createErasureCodingZone("/", null); + client.setErasureCodingPolicy("/", null); long totalCapacity = sum(capacities); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java index 2202b342dcd..9dc537c4b69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java @@ -56,7 +56,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS { conf = getConf(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient() - .createErasureCodingZone("/", null); + .setErasureCodingPolicy("/", null); try { cluster.waitActive(); doTestRead(conf, cluster, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java index 2e084fca0b1..9f4c47df858 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java @@ -71,7 +71,7 @@ public class TestSequentialBlockGroupId { private MiniDFSCluster cluster; private FileSystem fs; private SequentialBlockGroupIdGenerator blockGrpIdGenerator; - private Path eczone = new Path("/eczone"); + private Path ecDir = new Path("/ecDir"); @Before public void setup() throws Exception { @@ -84,9 +84,9 @@ public class TestSequentialBlockGroupId { fs = cluster.getFileSystem(); blockGrpIdGenerator = cluster.getNamesystem().getBlockIdManager() .getBlockGroupIdGenerator(); - fs.mkdirs(eczone); + fs.mkdirs(ecDir); cluster.getFileSystem().getClient() - .createErasureCodingZone("/eczone", null); + .setErasureCodingPolicy("/ecDir", null); } @After @@ -104,7 +104,7 @@ public class TestSequentialBlockGroupId { long blockGroupIdInitialValue = blockGrpIdGenerator.getCurrentValue(); // Create a file that is 4 blocks long. - Path path = new Path(eczone, "testBlockGrpIdGeneration.dat"); + Path path = new Path(ecDir, "testBlockGrpIdGeneration.dat"); DFSTestUtil.createFile(fs, path, cellSize, fileLen, blockSize, REPLICATION, SEED); List blocks = DFSTestUtil.getAllBlocks(fs, path); @@ -134,7 +134,7 @@ public class TestSequentialBlockGroupId { // Create a file with a few blocks to rev up the global block ID // counter. - Path path1 = new Path(eczone, "testBlockGrpIdCollisionDetection_file1.dat"); + Path path1 = new Path(ecDir, "testBlockGrpIdCollisionDetection_file1.dat"); DFSTestUtil.createFile(fs, path1, cellSize, fileLen, blockSize, REPLICATION, SEED); List blocks1 = DFSTestUtil.getAllBlocks(fs, path1); @@ -145,7 +145,7 @@ public class TestSequentialBlockGroupId { blockGrpIdGenerator.setCurrentValue(blockGroupIdInitialValue); // Trigger collisions by creating a new file. - Path path2 = new Path(eczone, "testBlockGrpIdCollisionDetection_file2.dat"); + Path path2 = new Path(ecDir, "testBlockGrpIdCollisionDetection_file2.dat"); DFSTestUtil.createFile(fs, path2, cellSize, fileLen, blockSize, REPLICATION, SEED); List blocks2 = DFSTestUtil.getAllBlocks(fs, path2); @@ -204,7 +204,7 @@ public class TestSequentialBlockGroupId { // Reset back to the initial value to trigger collision blockGrpIdGenerator.setCurrentValue(blockGroupIdInitialValue); // Trigger collisions by creating a new file. - Path path2 = new Path(eczone, "testCollisionWithLegacyBlock_file2.dat"); + Path path2 = new Path(ecDir, "testCollisionWithLegacyBlock_file2.dat"); DFSTestUtil.createFile(fs, path2, cellSize, fileLen, blockSize, REPLICATION, SEED); List blocks2 = DFSTestUtil.getAllBlocks(fs, path2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 2598fa832b2..7794f04ecd5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -470,8 +470,8 @@ public class TestMover { client.mkdirs(barDir, new FsPermission((short) 777), true); client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); - // set "/bar" directory with EC zone. - client.createErasureCodingZone(barDir, null); + // set an EC policy on "/bar" directory + client.setErasureCodingPolicy(barDir, null); // write file to barDir final String fooFile = "/bar/foo"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java index 7d06a9b9c68..ae2cbab1688 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.junit.After; @@ -39,7 +38,6 @@ import org.junit.Test; import java.io.IOException; import java.util.Arrays; -import java.util.HashSet; import java.util.List; import static org.junit.Assert.assertEquals; @@ -70,7 +68,7 @@ public class TestAddOverReplicatedStripedBlocks { cluster.waitActive(); fs = cluster.getFileSystem(); fs.mkdirs(dirPath); - fs.getClient().createErasureCodingZone(dirPath.toString(), null); + fs.getClient().setErasureCodingPolicy(dirPath.toString(), null); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index c472cd800d8..4351fb9f3ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -74,7 +74,7 @@ public class TestAddStripedBlocks { .numDataNodes(GROUP_SIZE).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); - dfs.getClient().createErasureCodingZone("/", null); + dfs.getClient().setErasureCodingPolicy("/", null); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 694411fca10..40572f374b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -452,7 +452,7 @@ public class TestFSEditLogLoader { //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); - fs.getClient().getNamenode().createErasureCodingZone(testDir, null); + fs.getClient().getNamenode().setErasureCodingPolicy(testDir, null); // Create a file with striped block Path p = new Path(testFilePath); @@ -524,7 +524,7 @@ public class TestFSEditLogLoader { //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); - fs.getClient().getNamenode().createErasureCodingZone(testDir, null); + fs.getClient().getNamenode().setErasureCodingPolicy(testDir, null); //create a file with striped blocks Path p = new Path(testFilePath); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index d3689f3d9a7..7565e86d3d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -141,7 +141,7 @@ public class TestFSImage { private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf, boolean isUC) throws IOException{ // contruct a INode with StripedBlock for saving and loading - fsn.createErasureCodingZone("/", null, false); + fsn.setErasureCodingPolicy("/", null, false); long id = 123456789; byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes(); PermissionStatus permissionStatus = new PermissionStatus("testuser_a", @@ -425,7 +425,7 @@ public class TestFSImage { .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); - fs.getClient().getNamenode().createErasureCodingZone("/", null); + fs.getClient().getNamenode().setErasureCodingPolicy("/", null); Path file = new Path("/striped"); FSDataOutputStream out = fs.create(file); byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index de299970ecc..efa5027fc61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import org.junit.After; import org.junit.Assert; @@ -66,7 +65,7 @@ public class TestQuotaWithStripedBlocks { dfs = cluster.getFileSystem(); dfs.mkdirs(ecDir); - dfs.getClient().createErasureCodingZone(ecDir.toString(), ecPolicy); + dfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy); dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA); dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA); dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index 48b22c064d7..2a593d5b0eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -217,8 +217,8 @@ public class TestStripedINodeFile { try { final int len = 1024; final Path parentDir = new Path("/parentDir"); - final Path zone = new Path(parentDir, "zone"); - final Path zoneFile = new Path(zone, "zoneFile"); + final Path ecDir = new Path(parentDir, "ecDir"); + final Path ecFile = new Path(ecDir, "ecFile"); final Path contiguousFile = new Path(parentDir, "someFile"); final DistributedFileSystem dfs; final Configuration conf = new Configuration(); @@ -232,18 +232,18 @@ public class TestStripedINodeFile { FSNamesystem fsn = cluster.getNamesystem(); dfs = cluster.getFileSystem(); - dfs.mkdirs(zone); + dfs.mkdirs(ecDir); - // create erasure zone - dfs.createErasureCodingZone(zone, null); - DFSTestUtil.createFile(dfs, zoneFile, len, (short) 1, 0xFEED); + // set erasure coding policy + dfs.setErasureCodingPolicy(ecDir, null); + DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED); DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED); final FSDirectory fsd = fsn.getFSDirectory(); // Case-1: Verify the behavior of striped blocks // Get blocks of striped file - INode inodeStriped = fsd.getINode("/parentDir/zone/zoneFile"); - assertTrue("Failed to get INodeFile for /parentDir/zone/zoneFile", + INode inodeStriped = fsd.getINode("/parentDir/ecDir/ecFile"); + assertTrue("Failed to get INodeFile for /parentDir/ecDir/ecFile", inodeStriped instanceof INodeFile); INodeFile inodeStripedFile = (INodeFile) inodeStriped; BlockInfo[] stripedBlks = inodeStripedFile.getBlocks(); @@ -252,8 +252,8 @@ public class TestStripedINodeFile { blockInfo.isDeleted()); } - // delete erasure zone directory - dfs.delete(zone, true); + // delete directory with erasure coding policy + dfs.delete(ecDir, true); for (BlockInfo blockInfo : stripedBlks) { assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java index 7bfaab67228..1e2326a5873 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java @@ -60,7 +60,7 @@ public class TestOfflineImageViewerWithStripedBlocks { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); Path eczone = new Path("/eczone"); fs.mkdirs(eczone); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml index 5e6065843f8..3a10a506e75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml @@ -48,39 +48,39 @@ - help: createZone command + help: setPolicy command - -fs NAMENODE -help createZone + -fs NAMENODE -help setPolicy RegexpComparator - ^[ \t]*Create a zone to encode files using a specified policy( )* + ^[ \t]*Set a specified erasure coding policy to a directory( )* RegexpComparator - ^-createZone \[-s <policyName>\] <path>(.)* + ^-setPolicy \[-s <policyName>\] <path>(.)* - help: getZone command + help: getPolicy command - -fs NAMENODE -help getZone + -fs NAMENODE -help getPolicy SubstringComparator - Get information about the EC zone at specified path + Get erasure coding policy information about at specified path RegexpComparator - ^-getZone <path>(.)* + ^-getPolicy <path>(.)* @@ -106,63 +106,63 @@ - createZone : create a zone to encode files + setPolicy : set erasure coding policy on a directory to encode files - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s RS-6-3-64k /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - EC Zone created successfully at NAMENODE/eczone + EC policy set successfully at NAMENODE/ecdir - createZone : create a zone twice + setPolicy : set a policy twice - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone /eczone - -fs NAMENODE -createZone /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy /ecdir + -fs NAMENODE -setPolicy /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - Directory /eczone is already in an erasure coding zone + Directory /ecdir already has an erasure coding policy - createZone : default policy + setPolicy : default policy - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone /eczone - -fs NAMENODE -getZone /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy /ecdir + -fs NAMENODE -getPolicy /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k + ErasureCodingPolicy=[Name=RS-6-3-64k - getZone : get information about the EC zone at specified path not in zone + getPolicy : get EC policy information at specified path, which doesn't have an EC policy -fs NAMENODE -mkdir /noec - -fs NAMENODE -getZone /noec + -fs NAMENODE -getPolicy /noec -fs NAMENODE -rmdir /noec @@ -170,45 +170,45 @@ SubstringComparator - Path NAMENODE/noec is not in EC zone + Path NAMENODE/noec is not erasure coded - getZone : get information about the EC zone at specified path + getPolicy : get EC policy information at specified path, which doesn't have an EC policy - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s RS-6-3-64k /eczone - -fs NAMENODE -getZone /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir + -fs NAMENODE -getPolicy /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k + ErasureCodingPolicy=[Name=RS-6-3-64k - getZone : get EC zone at specified file path + getPolicy : get EC policy information at specified path, which doesn't have an EC policy - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s RS-6-3-64k /eczone - -fs NAMENODE -touchz /eczone/ecfile - -fs NAMENODE -getZone /eczone/ecfile + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir + -fs NAMENODE -touchz /ecdir/ecfile + -fs NAMENODE -getPolicy /ecdir/ecfile - -fs NAMENODE -rm /eczone/ecfile - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rm /ecdir/ecfile + -fs NAMENODE -rmdir /ecdir SubstringComparator - Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k + ErasureCodingPolicy=[Name=RS-6-3-64k @@ -230,64 +230,64 @@ - createZone : illegal parameters - path is missing + setPolicy : illegal parameters - path is missing - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir RegexpComparator - ^-createZone: <path> is missing(.)* + ^-setPolicy: <path> is missing(.)* - createZone : illegal parameters - policy name is missing + setPolicy : illegal parameters - policy name is missing - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir RegexpComparator - ^-createZone: option -s requires 1 argument(.)* + ^-setPolicy: option -s requires 1 argument(.)* - createZone : illegal parameters - too many arguments + setPolicy : illegal parameters - too many arguments - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone /eczone1 /eczone2 + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy /ecdir1 /ecdir2 - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - -createZone: Too many arguments + -setPolicy: Too many arguments - createZone : illegal parameters - invalidpolicy + setPolicy : illegal parameters - invalidpolicy - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s invalidpolicy /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s invalidpolicy /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir @@ -298,62 +298,62 @@ - createZone : illegal parameters - no such file + setPolicy : illegal parameters - no such file - -fs NAMENODE -createZone /eczone + -fs NAMENODE -setPolicy /ecdir RegexpComparator - ^createZone: `/eczone': No such file or directory(.)* + ^setPolicy: `/ecdir': No such file or directory(.)* - getZone : illegal parameters - path is missing + getPolicy : illegal parameters - path is missing - -fs NAMENODE -getZone + -fs NAMENODE -getPolicy RegexpComparator - ^-getZone: <path> is missing(.)* + ^-getPolicy: <path> is missing(.)* - getZone : illegal parameters - too many arguments + getPolicy : illegal parameters - too many arguments - -fs NAMENODE -getZone /eczone /eczone + -fs NAMENODE -getPolicy /ecdir /ecdir - -fs NAMENODE -rm /eczone + -fs NAMENODE -rm /ecdir SubstringComparator - -getZone: Too many arguments + -getPolicy: Too many arguments - getZone : illegal parameters - no such file + getPolicy : illegal parameters - no such file - -fs NAMENODE -getZone /eczone + -fs NAMENODE -getPolicy /ecdir RegexpComparator - ^getZone: `/eczone': No such file or directory(.)* + ^getPolicy: `/ecdir': No such file or directory(.)* @@ -361,7 +361,7 @@ listPolicies : illegal parameters - too many parameters - -fs NAMENODE -listPolicies /eczone + -fs NAMENODE -listPolicies /ecdir