diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 0128b07bf91..7b6a4e57fed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2620,7 +2620,21 @@ public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy) } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, SafeModeException.class, - UnresolvedPathException.class); + UnresolvedPathException.class, + FileNotFoundException.class); + } + } + + public void unsetErasureCodingPolicy(String src) throws IOException { + checkOpen(); + try (TraceScope ignored = + newPathTraceScope("unsetErasureCodingPolicy", src)) { + namenode.unsetErasureCodingPolicy(src); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + SafeModeException.class, + UnresolvedPathException.class, + FileNotFoundException.class); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 4f97896a199..e9475d867ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -2467,6 +2467,35 @@ public Collection getAllErasureCodingPolicies() return Arrays.asList(dfs.getErasureCodingPolicies()); } + /** + * Unset the erasure coding policy from the source path. + * + * @param path The directory to unset the policy + * @throws IOException + */ + public void unsetErasureCodingPolicy(final Path path) throws IOException { + Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.unsetErasureCodingPolicy(getPathName(p)); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) throws IOException { + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem myDfs = (DistributedFileSystem) fs; + myDfs.unsetErasureCodingPolicy(p); + return null; + } + throw new UnsupportedOperationException( + "Cannot unsetErasureCodingPolicy through a symlink to a " + + "non-DistributedFileSystem: " + path + " -> " + p); + } + }.resolve(this, absF); + } + /** * Get the root directory of Trash for a path in HDFS. * 1. File in encryption zone returns /ez1/.Trash/username diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index 550806441a2..a192fa87eb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -496,6 +496,16 @@ public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException { return dfs.getClient().getErasureCodingPolicies(); } + /** + * Unset erasure coding policy from the directory. + * + * @param path The source path referring to a directory. + * @throws IOException + */ + public void unsetErasureCodingPolicy(final Path path) throws IOException { + dfs.unsetErasureCodingPolicy(path); + } + private void provisionEZTrash(Path path) throws IOException { // make sure the path is an EZ EncryptionZone ez = dfs.getEZForPath(path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 15bbe5161b1..407621bfa34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1534,6 +1534,13 @@ void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy) @Idempotent ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException; + /** + * Unset erasure coding policy from a specified path. + * @param src The path to unset policy. + */ + @AtMostOnce + void unsetErasureCodingPolicy(String src) throws IOException; + /** * Get {@link QuotaUsage} rooted at the specified directory. * @param path The string representation of the path diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index f73abfdffa1..de474b59fb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -173,6 +173,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; @@ -1474,6 +1475,20 @@ public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy) } } + @Override + public void unsetErasureCodingPolicy(String src) + throws IOException { + final UnsetErasureCodingPolicyRequestProto.Builder builder = + ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto.newBuilder(); + builder.setSrc(src); + UnsetErasureCodingPolicyRequestProto req = builder.build(); + try { + rpcProxy.unsetErasureCodingPolicy(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index 54bed325577..ff4db035904 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -898,6 +898,8 @@ service ClientNamenodeProtocol { returns(GetEZForPathResponseProto); rpc setErasureCodingPolicy(SetErasureCodingPolicyRequestProto) returns(SetErasureCodingPolicyResponseProto); + rpc unsetErasureCodingPolicy(UnsetErasureCodingPolicyRequestProto) + returns(UnsetErasureCodingPolicyResponseProto); rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto) returns(GetCurrentEditLogTxidResponseProto); rpc getEditsFromTxid(GetEditsFromTxidRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto index 4bb44fb4077..7ea8d32614f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto @@ -46,6 +46,13 @@ message GetErasureCodingPolicyResponseProto { optional ErasureCodingPolicyProto ecPolicy = 1; } +message UnsetErasureCodingPolicyRequestProto { + required string src = 1; +} + +message UnsetErasureCodingPolicyResponseProto { +} + /** * Block erasure coding reconstruction info */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 397495663ba..1aa15c8b6f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -215,6 +215,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; @@ -1449,6 +1451,18 @@ public SetErasureCodingPolicyResponseProto setErasureCodingPolicy( } } + @Override + public UnsetErasureCodingPolicyResponseProto unsetErasureCodingPolicy( + RpcController controller, UnsetErasureCodingPolicyRequestProto req) + throws ServiceException { + try { + server.unsetErasureCodingPolicy(req.getSrc()); + return UnsetErasureCodingPolicyResponseProto.newBuilder().build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public SetXAttrResponseProto setXAttr(RpcController controller, SetXAttrRequestProto req) throws ServiceException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 1f3b135121e..0ab8c89d79a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -80,7 +80,7 @@ static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn, try { iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK); src = iip.getPath(); - xAttrs = createErasureCodingPolicyXAttr(fsn, iip, ecPolicy); + xAttrs = setErasureCodingPolicyXAttr(fsn, iip, ecPolicy); } finally { fsd.writeUnlock(); } @@ -88,21 +88,20 @@ static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn, return fsd.getAuditFileInfo(iip); } - static List createErasureCodingPolicyXAttr(final FSNamesystem fsn, + static List setErasureCodingPolicyXAttr(final FSNamesystem fsn, final INodesInPath srcIIP, ErasureCodingPolicy ecPolicy) throws IOException { FSDirectory fsd = fsn.getFSDirectory(); assert fsd.hasWriteLock(); Preconditions.checkNotNull(srcIIP, "INodes cannot be null"); String src = srcIIP.getPath(); - if (srcIIP.getLastINode() != null && - !srcIIP.getLastINode().isDirectory()) { + final INode inode = srcIIP.getLastINode(); + if (inode == null) { + throw new FileNotFoundException("Path not found: " + srcIIP.getPath()); + } + if (!inode.isDirectory()) { throw new IOException("Attempt to set an erasure coding policy " + "for a file " + src); } - if (getErasureCodingPolicyForPath(fsn, srcIIP) != null) { - throw new IOException("Directory " + src + " already has an " + - "erasure coding policy."); - } // System default erasure coding policy will be used since no specified. if (ecPolicy == null) { @@ -124,7 +123,7 @@ static List createErasureCodingPolicyXAttr(final FSNamesystem fsn, ecPolicyNames.add(activePolicy.getName()); } throw new HadoopIllegalArgumentException("Policy [ " + - ecPolicy.getName()+ " ] does not match any of the " + + ecPolicy.getName() + " ] does not match any of the " + "supported policies. Please select any one of " + ecPolicyNames); } } @@ -140,10 +139,76 @@ static List createErasureCodingPolicyXAttr(final FSNamesystem fsn, } finally { IOUtils.closeStream(dOut); } + // check whether the directory already has an erasure coding policy + // directly on itself. + final Boolean hasEcXAttr = + getErasureCodingPolicyXAttrForINode(fsn, inode) == null ? false : true; final List xattrs = Lists.newArrayListWithCapacity(1); xattrs.add(ecXAttr); - FSDirXAttrOp.unprotectedSetXAttrs(fsd, srcIIP, xattrs, - EnumSet.of(XAttrSetFlag.CREATE)); + final EnumSet flag = hasEcXAttr ? + EnumSet.of(XAttrSetFlag.REPLACE) : EnumSet.of(XAttrSetFlag.CREATE); + FSDirXAttrOp.unprotectedSetXAttrs(fsd, srcIIP, xattrs, flag); + return xattrs; + } + + /** + * Unset erasure coding policy from the given directory. + * + * @param fsn The namespace + * @param srcArg The path of the target directory. + * @param logRetryCache whether to record RPC ids in editlog for retry + * cache rebuilding + * @return {@link HdfsFileStatus} + * @throws IOException + */ + static HdfsFileStatus unsetErasureCodingPolicy(final FSNamesystem fsn, + final String srcArg, final boolean logRetryCache) throws IOException { + assert fsn.hasWriteLock(); + + String src = srcArg; + FSPermissionChecker pc = fsn.getPermissionChecker(); + FSDirectory fsd = fsn.getFSDirectory(); + final INodesInPath iip; + List xAttrs; + fsd.writeLock(); + try { + iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK); + src = iip.getPath(); + xAttrs = removeErasureCodingPolicyXAttr(fsn, iip); + } finally { + fsd.writeUnlock(); + } + if (xAttrs != null) { + fsn.getEditLog().logRemoveXAttrs(src, xAttrs, logRetryCache); + } + return fsd.getAuditFileInfo(iip); + } + + private static List removeErasureCodingPolicyXAttr( + final FSNamesystem fsn, final INodesInPath srcIIP) throws IOException { + FSDirectory fsd = fsn.getFSDirectory(); + assert fsd.hasWriteLock(); + Preconditions.checkNotNull(srcIIP, "INodes cannot be null"); + String src = srcIIP.getPath(); + final INode inode = srcIIP.getLastINode(); + if (inode == null) { + throw new FileNotFoundException("Path not found: " + srcIIP.getPath()); + } + if (!inode.isDirectory()) { + throw new IOException("Cannot unset an erasure coding policy " + + "on a file " + src); + } + + // Check whether the directory has a specific erasure coding policy + // directly on itself. + final XAttr ecXAttr = getErasureCodingPolicyXAttrForINode(fsn, inode); + if (ecXAttr == null) { + return null; + } + + final List xattrs = Lists.newArrayListWithCapacity(1); + xattrs.add(ecXAttr); + FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP.getPath(), xattrs); return xattrs; } @@ -271,4 +336,32 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath(FSNamesystem fs } return null; } + + private static XAttr getErasureCodingPolicyXAttrForINode( + FSNamesystem fsn, INode inode) throws IOException { + // INode can be null + if (inode == null) { + return null; + } + FSDirectory fsd = fsn.getFSDirectory(); + fsd.readLock(); + try { + // We don't allow setting EC policies on paths with a symlink. Thus + // if a symlink is encountered, the dir shouldn't have EC policy. + // TODO: properly support symlinks + if (inode.isSymlink()) { + return null; + } + final XAttrFeature xaf = inode.getXAttrFeature(); + if (xaf != null) { + XAttr xattr = xaf.getXAttr(XATTR_ERASURECODING_POLICY); + if (xattr != null) { + return xattr; + } + } + } finally { + fsd.readUnlock(); + } + return null; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 90fb924ee91..249324bed18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6774,6 +6774,42 @@ void setErasureCodingPolicy(final String srcArg, final ErasureCodingPolicy resultingStat); } + /** + * Unset an erasure coding policy from the given path. + * @param srcArg The path of the target directory. + * @throws AccessControlException if the caller is not the superuser. + * @throws UnresolvedLinkException if the path can't be resolved. + * @throws SafeModeException if the Namenode is in safe mode. + */ + void unsetErasureCodingPolicy(final String srcArg, + final boolean logRetryCache) throws IOException, + UnresolvedLinkException, SafeModeException, AccessControlException { + final String operationName = "unsetErasureCodingPolicy"; + checkSuperuserPrivilege(); + checkOperation(OperationCategory.WRITE); + HdfsFileStatus resultingStat = null; + boolean success = false; + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot unset erasure coding policy on " + srcArg); + resultingStat = FSDirErasureCodingOp.unsetErasureCodingPolicy(this, + srcArg, logRetryCache); + success = true; + } catch (AccessControlException ace) { + logAuditEvent(success, operationName, srcArg, null, + resultingStat); + throw ace; + } finally { + writeUnlock(operationName); + if (success) { + getEditLog().logSync(); + } + } + logAuditEvent(success, operationName, srcArg, null, + resultingStat); + } + /** * Get the erasure coding policy information for specified path */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 735b2c06206..6a3f966eec2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -2231,6 +2231,22 @@ public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException return namesystem.getErasureCodingPolicy(src); } + @Override // ClientProtocol + public void unsetErasureCodingPolicy(String src) throws IOException { + checkNNStartup(); + final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; + } + boolean success = false; + try { + namesystem.unsetErasureCodingPolicy(src, cacheEntry != null); + success = true; + } finally { + RetryCache.setState(cacheEntry, success); + } + } + @Override // ReconfigurationProtocol public void startReconfiguration() throws IOException { checkNNStartup(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java index 978fe47423c..fc732e05a22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java @@ -47,6 +47,8 @@ public static void registerCommands(CommandFactory factory) { factory.addClass(SetECPolicyCommand.class, "-" + SetECPolicyCommand.NAME); factory.addClass(GetECPolicyCommand.class, "-" + GetECPolicyCommand.NAME); + factory.addClass(UnsetECPolicyCommand.class, "-" + + UnsetECPolicyCommand.NAME); factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME); } @@ -211,4 +213,36 @@ protected void processOptions(LinkedList args) throws IOException { out.println(sb.toString()); } } + + /** + * Unset the erasure coding policy from a directory. + */ + static class UnsetECPolicyCommand extends ECCommand { + public static final String NAME = "unsetPolicy"; + public static final String USAGE = ""; + public static final String DESCRIPTION = + "Unset erasure coding policy from a directory\n"; + + @Override + protected void processOptions(LinkedList args) throws IOException { + if (args.isEmpty()) { + throw new HadoopIllegalArgumentException(" is missing"); + } + if (args.size() > 1) { + throw new HadoopIllegalArgumentException("Too many arguments"); + } + } + + @Override + protected void processPath(PathData item) throws IOException { + super.processPath(item); + DistributedFileSystem dfs = (DistributedFileSystem) item.fs; + try { + dfs.unsetErasureCodingPolicy(item.path); + } catch (IOException e) { + throw new IOException("Unable to unset EC policy from directory " + + item.path + ". " + e.getMessage()); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md index 517469d1c5b..d5dbd0bc39f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md @@ -129,6 +129,7 @@ Deployment hdfs erasurecode [generic options] [-setPolicy [-p ] ] [-getPolicy ] + [-unsetPolicy ] [-listPolicies] [-usage [cmd ...]] [-help [cmd ...]] @@ -147,6 +148,10 @@ Below are the details about each command. Get details of the ErasureCoding policy of a file or directory at the specified path. + * `[-unsetPolicy ]` + + Unset an ErasureCoding policy from a directory at the specified path when previously user sets the ErasureCoding policy on this directory via "setPolicy" command. If the directory inherits the ErasureCoding policy from its parent group, "unsetPolicy" command on this directory will not have any effect. Unset ErasureCoding policy on a directory which doesn't have ErasureCoding policy will not return an error. + * `[-listPolicies]` Lists all supported ErasureCoding policies. These names are suitable for use with the `setPolicy` command. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index b7c3ed8ccaa..27fbf18df54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -155,7 +155,7 @@ public void testBasicSetECPolicy() INode newInode = namesystem.getFSDirectory().getINode(newFile.toString()); assertTrue(newInode.asFile().isStriped()); - /* Verify that nested EC policies not supported */ + /* Verify that nested EC policies are supported */ final Path dir1 = new Path("/dir1"); final Path dir2 = new Path(dir1, "dir2"); fs.mkdir(dir1, FsPermission.getDirDefault()); @@ -163,9 +163,8 @@ public void testBasicSetECPolicy() fs.mkdir(dir2, FsPermission.getDirDefault()); try { fs.getClient().setErasureCodingPolicy(dir2.toString(), null); - fail("Nested erasure coding policies"); } catch (IOException e) { - assertExceptionContains("already has an erasure coding policy", e); + fail("Nested erasure coding policies are supported"); } /* Verify that EC policy cannot be set on a file */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java new file mode 100644 index 00000000000..1a4086e711d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java @@ -0,0 +1,366 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; +import org.apache.hadoop.io.erasurecode.CodecUtil; +import org.apache.hadoop.io.erasurecode.ErasureCodeNative; +import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.junit.Assert; + +import java.io.FileNotFoundException; +import java.io.IOException; + +import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; +import static org.junit.Assert.fail; + +/** + * Test unset and change directory's erasure coding policy. + */ +public class TestUnsetAndChangeDirectoryEcPolicy { + + public static final Log LOG = + LogFactory.getLog(TestUnsetAndChangeDirectoryEcPolicy.class); + + private MiniDFSCluster cluster; + private Configuration conf = new Configuration(); + private DistributedFileSystem fs; + private ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager + .getSystemDefaultPolicy(); + private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); + private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); + private final int cellSize = ecPolicy.getCellSize(); + private final int stripsPerBlock = 2; + private final int blockSize = stripsPerBlock * cellSize; + private final int blockGroupSize = dataBlocks * blockSize; + + @Rule + public Timeout globalTimeout = new Timeout(300000); + + @Before + public void setup() throws IOException { + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); + if (ErasureCodeNative.isNativeCodeLoaded()) { + conf.set( + CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY, + NativeRSRawErasureCoderFactory.class.getCanonicalName()); + } + cluster = new MiniDFSCluster.Builder(conf).numDataNodes( + dataBlocks + parityBlocks).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + } + + @After + public void tearDown() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + /* + * Test unset EC policy on directory. + */ + @Test + public void testUnsetEcPolicy() throws Exception { + final int numBlocks = 1; + final int fileLen = blockGroupSize * numBlocks; + final Path dirPath = new Path("/striped"); + final Path ecFilePath = new Path(dirPath, "ec_file"); + final Path replicateFilePath = new Path(dirPath, "3x_file"); + + fs.mkdirs(dirPath); + // Test unset a directory which has no EC policy + fs.unsetErasureCodingPolicy(dirPath); + // Set EC policy on directory + fs.setErasureCodingPolicy(dirPath, ecPolicy); + + DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L); + fs.unsetErasureCodingPolicy(dirPath); + DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L); + + // ec_file should has EC policy + ErasureCodingPolicy tempEcPolicy = + fs.getErasureCodingPolicy(ecFilePath); + Assert.assertTrue("Erasure coding policy mismatch!", + tempEcPolicy.getName().equals(ecPolicy.getName())); + + // rep_file should not have EC policy + tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath); + Assert.assertNull("Replicate file should not have erasure coding policy!", + tempEcPolicy); + + // Directory should not return erasure coding policy + tempEcPolicy = fs.getErasureCodingPolicy(dirPath); + Assert.assertNull("Directory should no have erasure coding policy set!", + tempEcPolicy); + + fs.delete(dirPath, true); + } + + /* + * Test nested directory with different EC policy. + */ + @Test + public void testNestedEcPolicy() throws Exception { + final int numBlocks = 1; + final int fileLen = blockGroupSize * numBlocks; + final Path parentDir = new Path("/ec-6-3"); + final Path childDir = new Path("/ec-6-3/ec-3-2"); + final Path ec63FilePath = new Path(childDir, "ec_6_3_file"); + final Path ec32FilePath = new Path(childDir, "ec_3_2_file"); + final Path ec63FilePath2 = new Path(childDir, "ec_6_3_file_2"); + final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager + .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID); + + fs.mkdirs(parentDir); + fs.setErasureCodingPolicy(parentDir, ecPolicy); + fs.mkdirs(childDir); + // Create RS(6,3) EC policy file + DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L); + // Set RS(3,2) EC policy on child directory + fs.setErasureCodingPolicy(childDir, ec32Policy); + // Create RS(3,2) EC policy file + DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L); + + // Start to check + // ec_6_3_file should has RS-6-3 EC policy + ErasureCodingPolicy tempEcPolicy = + fs.getErasureCodingPolicy(ec63FilePath); + Assert.assertTrue("Erasure coding policy mismatch!", + tempEcPolicy.getName().equals(ecPolicy.getName())); + + // ec_3_2_file should have RS-3-2 policy + tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath); + Assert.assertTrue("Erasure coding policy mismatch!", + tempEcPolicy.getName().equals(ec32Policy.getName())); + + // Child directory should have RS-3-2 policy + tempEcPolicy = fs.getErasureCodingPolicy(childDir); + Assert.assertTrue( + "Directory should have erasure coding policy set!", + tempEcPolicy.getName().equals(ec32Policy.getName())); + + // Unset EC policy on child directory + fs.unsetErasureCodingPolicy(childDir); + DFSTestUtil.createFile(fs, ec63FilePath2, fileLen, (short) 1, 0L); + + // ec_6_3_file_2 should have RS-6-3 policy + tempEcPolicy = fs.getErasureCodingPolicy(ec63FilePath2); + Assert.assertTrue("Erasure coding policy mismatch!", + tempEcPolicy.getName().equals(ecPolicy.getName())); + + // Child directory should have RS-6-3 policy now + tempEcPolicy = fs.getErasureCodingPolicy(childDir); + Assert.assertTrue( + "Directory should have erasure coding policy set!", + tempEcPolicy.getName().equals(ecPolicy.getName())); + + fs.delete(parentDir, true); + } + + + /* + * Test unset EC policy on root directory. + */ + @Test + public void testUnsetRootDirEcPolicy() throws Exception { + final int numBlocks = 1; + final int fileLen = blockGroupSize * numBlocks; + final Path rootPath = new Path("/"); + final Path ecFilePath = new Path(rootPath, "ec_file"); + final Path replicateFilePath = new Path(rootPath, "rep_file"); + + // Test unset root path which has no EC policy + fs.unsetErasureCodingPolicy(rootPath); + // Set EC policy on root path + fs.setErasureCodingPolicy(rootPath, ecPolicy); + DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L); + fs.unsetErasureCodingPolicy(rootPath); + DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L); + + // ec_file should has EC policy set + ErasureCodingPolicy tempEcPolicy = + fs.getErasureCodingPolicy(ecFilePath); + Assert.assertTrue("Erasure coding policy mismatch!", + tempEcPolicy.getName().equals(ecPolicy.getName())); + + // rep_file should not have EC policy set + tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath); + Assert.assertNull("Replicate file should not have erasure coding policy!", + tempEcPolicy); + + // Directory should not return erasure coding policy + tempEcPolicy = fs.getErasureCodingPolicy(rootPath); + Assert.assertNull("Directory should not have erasure coding policy set!", + tempEcPolicy); + + fs.delete(rootPath, true); + } + + /* + * Test change EC policy on root directory. + */ + @Test + public void testChangeRootDirEcPolicy() throws Exception { + final int numBlocks = 1; + final int fileLen = blockGroupSize * numBlocks; + final Path rootPath = new Path("/"); + final Path ec63FilePath = new Path(rootPath, "ec_6_3_file"); + final Path ec32FilePath = new Path(rootPath, "ec_3_2_file"); + final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager + .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID); + + fs.unsetErasureCodingPolicy(rootPath); + fs.setErasureCodingPolicy(rootPath, ecPolicy); + // Create RS(6,3) EC policy file + DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L); + // Change EC policy from RS(6,3) to RS(3,2) + fs.setErasureCodingPolicy(rootPath, ec32Policy); + DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L); + + // start to check + // ec_6_3_file should has RS-6-3 ec policy set + ErasureCodingPolicy tempEcPolicy = + fs.getErasureCodingPolicy(ec63FilePath); + Assert.assertTrue("Erasure coding policy mismatch!", + tempEcPolicy.getName().equals(ecPolicy.getName())); + + // ec_3_2_file should have RS-3-2 policy + tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath); + Assert.assertTrue("Erasure coding policy mismatch!", + tempEcPolicy.getName().equals(ec32Policy.getName())); + + // Root directory should have RS-3-2 policy + tempEcPolicy = fs.getErasureCodingPolicy(rootPath); + Assert.assertTrue( + "Directory should have erasure coding policy!", + tempEcPolicy.getName().equals(ec32Policy.getName())); + + fs.delete(rootPath, true); + } + + /* + * Test different replica factor files. + */ + @Test + public void testDifferentReplicaFactor() throws Exception { + final int numBlocks = 1; + final int fileLen = blockGroupSize * numBlocks; + final Path ecDirPath = new Path("/striped"); + final Path ecFilePath = new Path(ecDirPath, "ec_file"); + final Path replicateFilePath = new Path(ecDirPath, "rep_file"); + final Path replicateFilePath2 = new Path(ecDirPath, "rep_file2"); + + fs.mkdirs(ecDirPath); + fs.setErasureCodingPolicy(ecDirPath, ecPolicy); + DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L); + fs.unsetErasureCodingPolicy(ecDirPath); + DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 3, 0L); + DFSTestUtil.createFile(fs, replicateFilePath2, fileLen, (short) 2, 0L); + + // ec_file should has EC policy set + ErasureCodingPolicy tempEcPolicy = + fs.getErasureCodingPolicy(ecFilePath); + Assert.assertTrue("Erasure coding policy mismatch!", + tempEcPolicy.getName().equals(ecPolicy.getName())); + + // rep_file should not have EC policy set + tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath); + Assert.assertNull("Replicate file should not have erasure coding policy!", + tempEcPolicy); + tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath2); + Assert.assertNull("Replicate file should not have erasure coding policy!", + tempEcPolicy); + + // Directory should not return erasure coding policy + tempEcPolicy = fs.getErasureCodingPolicy(ecDirPath); + Assert.assertNull("Directory should not have erasure coding policy set!", + tempEcPolicy); + + fs.delete(ecDirPath, true); + } + + + /* + * Test set and unset EC policy on directory doesn't exist. + */ + @Test + public void testNonExistentDir() throws Exception { + final Path dirPath = new Path("/striped"); + + // Unset EC policy on non-existent directory + try { + fs.unsetErasureCodingPolicy(dirPath); + fail("FileNotFoundException should be thrown for a non-existent" + + " file path"); + } catch (FileNotFoundException e) { + assertExceptionContains("Path not found: " + dirPath, e); + } + + // Set EC policy on non-existent directory + try { + fs.setErasureCodingPolicy(dirPath, ecPolicy); + fail("FileNotFoundException should be thrown for a non-existent" + + " file path"); + } catch (FileNotFoundException e) { + assertExceptionContains("Path not found: " + dirPath, e); + } + } + + /* + * Test set and unset EC policy on file. + */ + @Test + public void testEcPolicyOnFile() throws Exception { + final Path ecFilePath = new Path("/striped_file"); + final int fileLen = blockGroupSize * 2; + DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L); + + // Set EC policy on file + try { + fs.setErasureCodingPolicy(ecFilePath, ecPolicy); + fail("IOException should be thrown for setting EC policy on file"); + } catch (IOException e) { + assertExceptionContains("Attempt to set an erasure coding policy " + + "for a file " + ecFilePath, e); + } + + // Unset EC policy on file + try { + fs.unsetErasureCodingPolicy(ecFilePath); + fail("IOException should be thrown for unsetting EC policy on file"); + } catch (IOException e) { + assertExceptionContains("Cannot unset an erasure coding policy on a file " + + ecFilePath, e); + } + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml index f8ee9738aa3..dd26b4889c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml @@ -151,7 +151,7 @@ SubstringComparator - Directory /ecdir already has an erasure coding policy + EC policy set successfully at NAMENODE/ecdir @@ -174,6 +174,68 @@ + + unsetPolicy : unset policy and get + + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy /ecdir + -fs NAMENODE -unsetPolicy /ecdir + -fs NAMENODE -getPolicy /ecdir + + + -fs NAMENODE -rmdir /ecdir + + + + SubstringComparator + is not erasure coded. + + + + + + setPolicy : change different policy and get + + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy /ecdir + -fs NAMENODE -setPolicy -p RS-DEFAULT-3-2-64k + /ecdir + -fs NAMENODE -getPolicy /ecdir + + + -fs NAMENODE -rmdir /ecdir + + + + SubstringComparator + ErasureCodingPolicy=[Name=RS-DEFAULT-3-2-64k + + + + + + unsetPolicy : unset inherited EC policy, has no effect + + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -mkdir /ecdir/child + -fs NAMENODE -setPolicy /ecdir + -fs NAMENODE -unsetPolicy /ecdir/child + -fs NAMENODE -touchz /ecdir/child/ecfile + -fs NAMENODE -getPolicy /ecdir/child/ecfile + + + -fs NAMENODE -rm /ecdir/child/ecfile + -fs NAMENODE -rmdir /ecdir/child + -fs NAMENODE -rmdir /ecdir + + + + SubstringComparator + ErasureCodingPolicy=[Name=RS-DEFAULT-6-3-64k + + + + getPolicy : get EC policy information at specified path, which doesn't have an EC policy