diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 77b053dad34..ddfe98f2f4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -133,6 +133,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.NoECPolicySetException; import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType; @@ -2757,7 +2758,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, throw re.unwrapRemoteException(AccessControlException.class, SafeModeException.class, UnresolvedPathException.class, - FileNotFoundException.class); + FileNotFoundException.class, NoECPolicySetException.class); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NoECPolicySetException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NoECPolicySetException.java new file mode 100644 index 00000000000..de3054a4d63 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NoECPolicySetException.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + *Thrown when no EC policy is set explicitly on the directory. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class NoECPolicySetException extends IOException { + private static final long serialVersionUID = 1L; + + public NoECPolicySetException(String msg) { + super(msg); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 920451db6bc..5ebfa2f9010 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; +import org.apache.hadoop.hdfs.protocol.NoECPolicySetException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.WritableUtils; @@ -206,6 +207,9 @@ final class FSDirErasureCodingOp { } if (xAttrs != null) { fsn.getEditLog().logRemoveXAttrs(src, xAttrs, logRetryCache); + } else { + throw new NoECPolicySetException( + "No erasure coding policy explicitly set on " + src); } return fsd.getAuditFileInfo(iip); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java index 56706b279fc..5f8626e0702 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; +import org.apache.hadoop.hdfs.protocol.NoECPolicySetException; import org.apache.hadoop.hdfs.util.ECPolicyLoader; import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import org.apache.hadoop.tools.TableListing; @@ -424,6 +425,12 @@ public class ECAdmin extends Configured implements Tool { "non-empty directory will not automatically convert existing" + " files to replicated data."); } + } catch (NoECPolicySetException e) { + System.err.println(AdminHelper.prettifyException(e)); + System.err.println("Use '-setPolicy -path -replicate' to enforce" + + " default replication policy irrespective of EC policy" + + " defined on parent."); + return 2; } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); return 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java index 52cf163e26d..362157dc370 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.NoECPolicySetException; import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.ErasureCodeNative; import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; @@ -98,7 +99,11 @@ public class TestUnsetAndChangeDirectoryEcPolicy { fs.mkdirs(dirPath); // Test unset a directory which has no EC policy - fs.unsetErasureCodingPolicy(dirPath); + try { + fs.unsetErasureCodingPolicy(dirPath); + fail(); + } catch (NoECPolicySetException e) { + } // Set EC policy on directory fs.setErasureCodingPolicy(dirPath, ecPolicy.getName()); @@ -126,8 +131,8 @@ public class TestUnsetAndChangeDirectoryEcPolicy { } /* - * Test nested directory with different EC policy. - */ + * Test nested directory with different EC policy. + */ @Test public void testNestedEcPolicy() throws Exception { final int numBlocks = 1; @@ -199,7 +204,11 @@ public class TestUnsetAndChangeDirectoryEcPolicy { final Path replicateFilePath = new Path(rootPath, "rep_file"); // Test unset root path which has no EC policy - fs.unsetErasureCodingPolicy(rootPath); + try { + fs.unsetErasureCodingPolicy(rootPath); + fail(); + } catch (NoECPolicySetException e) { + } // Set EC policy on root path fs.setErasureCodingPolicy(rootPath, ecPolicy.getName()); DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L); @@ -238,7 +247,11 @@ public class TestUnsetAndChangeDirectoryEcPolicy { final ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID); - fs.unsetErasureCodingPolicy(rootPath); + try { + fs.unsetErasureCodingPolicy(rootPath); + fail(); + } catch (NoECPolicySetException e) { + } fs.setErasureCodingPolicy(rootPath, ecPolicy.getName()); // Create RS(6,3) EC policy file DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml index 1a7f2c7d347..2cc08f4aa5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml @@ -359,6 +359,30 @@ + + unsetPolicy : unset on non EC directory + + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -mkdir /ecdir/child + -fs NAMENODE -unsetPolicy -path /ecdir/child + + + -fs NAMENODE -rm /ecdir/child/ecfile + -fs NAMENODE -rmdir /ecdir/child + -fs NAMENODE -rmdir /ecdir + + + + SubstringComparator + NoECPolicySetException: No erasure coding policy explicitly set on /ecdir/child + + + SubstringComparator + Use '-setPolicy -path <PATH> -replicate' to enforce default replication policy irrespective of EC policy defined on parent. + + + + unsetPolicy : unset policy on non-empty directory