HADOOP-16150. checksumFS doesn't wrap concat(): concatenated files don't have checksums.

Contributed by Steve Loughran.

Change-Id: I85fc1fc9445ca0b7d325495d3bc55fe9f5e5ce52
This commit is contained in:
Steve Loughran 2019-02-27 22:56:38 +00:00
parent feccd282fe
commit 8b517e7ad6
2 changed files with 14 additions and 0 deletions

View File

@ -372,6 +372,12 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
+ "by ChecksumFileSystem");
}
@Override
public void concat(final Path f, final Path[] psrcs) throws IOException {
throw new UnsupportedOperationException("Concat is not supported "
+ "by ChecksumFileSystem");
}
/**
* Calculated the length of the checksum file in bytes.
* @param size the length of the data file in bytes

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.fs.contract.localfs;
import org.junit.Assume;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
@ -27,6 +29,12 @@ import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestLocalFSContractMultipartUploader
extends AbstractContractMultipartUploaderTest {
@Override
public void setup() throws Exception {
Assume.assumeTrue("Skipping until HDFS-13934", false);
super.setup();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new LocalFSContract(conf);