From 2634aaaf9a26e01c508623364da7a2a332397117 Mon Sep 17 00:00:00 2001 From: Erik Krogen Date: Fri, 2 Aug 2019 11:48:31 -0700 Subject: [PATCH] HDFS-14462 Ensure WebHDFS client throws the correct exception during writes. Contributed by Simbarashe Dzinamarira. (cherry picked from e7a0b8aa83c1fb933d409c514d2155e986e4e25b) --- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 30 +++++++++++ .../apache/hadoop/hdfs/web/TestWebHDFS.java | 52 +++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 93f441d0233..f716593cf0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -929,6 +929,10 @@ public class WebHdfsFileSystem extends FileSystem return toUrl(op, fspath, parameters); } } + + Path getFspath() { + return fspath; + } } /** @@ -1017,6 +1021,32 @@ public class WebHdfsFileSystem extends FileSystem throws IOException { return new FSDataOutputStream(new BufferedOutputStream( conn.getOutputStream(), bufferSize), statistics) { + @Override + public void write(int b) throws IOException { + try { + super.write(b); + } catch (IOException e) { + LOG.warn("Write to output stream for file '{}' failed. " + + "Attempting to fetch the cause from the connection.", + getFspath(), e); + validateResponse(op, conn, true); + throw e; + } + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + try { + super.write(b, off, len); + } catch (IOException e) { + LOG.warn("Write to output stream for file '{}' failed. " + + "Attempting to fetch the cause from the connection.", + getFspath(), e); + validateResponse(op, conn, true); + throw e; + } + } + @Override public void close() throws IOException { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 40176573ec9..21a6d9c5e60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestFileCreation; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; @@ -391,6 +392,57 @@ public class TestWebHDFS { } } + /** + * Test client receives correct DSQuotaExceededException. + */ + @Test + public void testExceedingFileSpaceQuota() throws Exception { + final Configuration conf = WebHdfsTestUtil.createConf(); + long spaceQuota = 50L << 20; + long fileLength = 80L << 20; + + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3) + .build(); + + try { + cluster.waitActive(); + + final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsConstants.WEBHDFS_SCHEME); + final Path dir = new Path("/test/largeFile"); + assertTrue(fs.mkdirs(dir)); + + final byte[] data = new byte[1 << 20]; + RANDOM.nextBytes(data); + + cluster.getFileSystem().setQuota(dir, HdfsConstants.QUOTA_DONT_SET, + spaceQuota); + + final Path p = new Path(dir, "file"); + + FSDataOutputStream out = fs.create(p); + try { + for (long remaining = fileLength; remaining > 0;) { + final int n = (int) Math.min(remaining, data.length); + out.write(data, 0, n); + remaining -= n; + } + fail("should have thrown exception during the write"); + } catch (DSQuotaExceededException e) { + //expected + } finally { + try { + out.close(); + } catch (Exception e) { + // discard exception from close + } + } + } finally { + cluster.shutdown(); + } + } + @Test(timeout=300000) public void testCustomizedUserAndGroupNames() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf();