HDFS-14462 Ensure WebHDFS client throws the correct exception during writes. Contributed by Simbarashe Dzinamarira.

(cherry picked from e7a0b8aa83)
This commit is contained in:
Erik Krogen 2019-08-02 11:48:31 -07:00 committed by Erik Krogen
parent e1659fe009
commit 2634aaaf9a
2 changed files with 82 additions and 0 deletions

View File

@ -929,6 +929,10 @@ public class WebHdfsFileSystem extends FileSystem
return toUrl(op, fspath, parameters);
}
}
Path getFspath() {
return fspath;
}
}
/**
@ -1017,6 +1021,32 @@ public class WebHdfsFileSystem extends FileSystem
throws IOException {
return new FSDataOutputStream(new BufferedOutputStream(
conn.getOutputStream(), bufferSize), statistics) {
@Override
public void write(int b) throws IOException {
try {
super.write(b);
} catch (IOException e) {
LOG.warn("Write to output stream for file '{}' failed. "
+ "Attempting to fetch the cause from the connection.",
getFspath(), e);
validateResponse(op, conn, true);
throw e;
}
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
try {
super.write(b, off, len);
} catch (IOException e) {
LOG.warn("Write to output stream for file '{}' failed. "
+ "Attempting to fetch the cause from the connection.",
getFspath(), e);
validateResponse(op, conn, true);
throw e;
}
}
@Override
public void close() throws IOException {
try {

View File

@ -84,6 +84,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.TestFileCreation;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
@ -391,6 +392,57 @@ public class TestWebHDFS {
}
}
/**
* Test client receives correct DSQuotaExceededException.
*/
@Test
public void testExceedingFileSpaceQuota() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
long spaceQuota = 50L << 20;
long fileLength = 80L << 20;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.build();
try {
cluster.waitActive();
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
final Path dir = new Path("/test/largeFile");
assertTrue(fs.mkdirs(dir));
final byte[] data = new byte[1 << 20];
RANDOM.nextBytes(data);
cluster.getFileSystem().setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
spaceQuota);
final Path p = new Path(dir, "file");
FSDataOutputStream out = fs.create(p);
try {
for (long remaining = fileLength; remaining > 0;) {
final int n = (int) Math.min(remaining, data.length);
out.write(data, 0, n);
remaining -= n;
}
fail("should have thrown exception during the write");
} catch (DSQuotaExceededException e) {
//expected
} finally {
try {
out.close();
} catch (Exception e) {
// discard exception from close
}
}
} finally {
cluster.shutdown();
}
}
@Test(timeout=300000)
public void testCustomizedUserAndGroupNames() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();