HADOOP-9361: Strictly define FileSystem APIs
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1607621 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9133e3142c
commit
6ba46c2dfc
|
@ -324,6 +324,8 @@ Release 2.5.0 - UNRELEASED
|
||||||
HADOOP-10710. hadoop.auth cookie is not properly constructed according to
|
HADOOP-10710. hadoop.auth cookie is not properly constructed according to
|
||||||
RFC2109. (Juan Yu via tucu)
|
RFC2109. (Juan Yu via tucu)
|
||||||
|
|
||||||
|
HADOOP-10312 Shell.ExitCodeException to have more useful toString (stevel)
|
||||||
|
|
||||||
Release 2.4.1 - 2014-06-23
|
Release 2.4.1 - 2014-06-23
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
|
import java.io.EOFException;
|
||||||
import java.io.FileDescriptor;
|
import java.io.FileDescriptor;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -51,6 +52,9 @@ implements Seekable, PositionedReadable, HasFileDescriptor {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getPos() throws IOException {
|
public long getPos() throws IOException {
|
||||||
|
if (in == null) {
|
||||||
|
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||||
|
}
|
||||||
return ((FSInputStream)in).getPos()-(count-pos);
|
return ((FSInputStream)in).getPos()-(count-pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,8 +70,11 @@ implements Seekable, PositionedReadable, HasFileDescriptor {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void seek(long pos) throws IOException {
|
public void seek(long pos) throws IOException {
|
||||||
if( pos<0 ) {
|
if (in == null) {
|
||||||
return;
|
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||||
|
}
|
||||||
|
if (pos < 0) {
|
||||||
|
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
|
||||||
}
|
}
|
||||||
if (this.pos != this.count) {
|
if (this.pos != this.count) {
|
||||||
// optimize: check if the pos is in the buffer
|
// optimize: check if the pos is in the buffer
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.io.EOFException;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
@ -318,8 +319,8 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void seek(long pos) throws IOException {
|
public synchronized void seek(long pos) throws IOException {
|
||||||
if(pos>getFileLength()) {
|
if (pos > getFileLength()) {
|
||||||
throw new IOException("Cannot seek after EOF");
|
throw new EOFException("Cannot seek after EOF");
|
||||||
}
|
}
|
||||||
super.seek(pos);
|
super.seek(pos);
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,10 @@ public class FSDataOutputStream extends DataOutputStream
|
||||||
}
|
}
|
||||||
|
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
out.close();
|
// ensure close works even if a null reference was passed in
|
||||||
|
if (out != null) {
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,43 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Standard strings to use in exception messages in filesystems
|
||||||
|
* HDFS is used as the reference source of the strings
|
||||||
|
*/
|
||||||
|
public class FSExceptionMessages {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The operation failed because the stream is closed: {@value}
|
||||||
|
*/
|
||||||
|
public static final String STREAM_IS_CLOSED = "Stream is closed!";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Negative offset seek forbidden : {@value}
|
||||||
|
*/
|
||||||
|
public static final String NEGATIVE_SEEK =
|
||||||
|
"Cannot seek to a negative offset";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Seeks : {@value}
|
||||||
|
*/
|
||||||
|
public static final String CANNOT_SEEK_PAST_EOF =
|
||||||
|
"Attempted to seek or read past the end of the file";
|
||||||
|
}
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.io.EOFException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.util.zip.Checksum;
|
import java.util.zip.Checksum;
|
||||||
|
@ -394,8 +395,8 @@ abstract public class FSInputChecker extends FSInputStream {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void seek(long pos) throws IOException {
|
public synchronized void seek(long pos) throws IOException {
|
||||||
if( pos<0 ) {
|
if( pos < 0 ) {
|
||||||
return;
|
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
|
||||||
}
|
}
|
||||||
// optimize: check if the pos is in the buffer
|
// optimize: check if the pos is in the buffer
|
||||||
long start = chunkPos - this.count;
|
long start = chunkPos - this.count;
|
||||||
|
|
|
@ -23,6 +23,7 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
import java.io.DataOutput;
|
import java.io.DataOutput;
|
||||||
|
import java.io.EOFException;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileInputStream;
|
import java.io.FileInputStream;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
@ -105,6 +106,10 @@ public class RawLocalFileSystem extends FileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void seek(long pos) throws IOException {
|
public void seek(long pos) throws IOException {
|
||||||
|
if (pos < 0) {
|
||||||
|
throw new EOFException(
|
||||||
|
FSExceptionMessages.NEGATIVE_SEEK);
|
||||||
|
}
|
||||||
fis.getChannel().position(pos);
|
fis.getChannel().position(pos);
|
||||||
this.position = pos;
|
this.position = pos;
|
||||||
}
|
}
|
||||||
|
@ -256,7 +261,7 @@ public class RawLocalFileSystem extends FileSystem {
|
||||||
boolean createParent, int bufferSize, short replication, long blockSize,
|
boolean createParent, int bufferSize, short replication, long blockSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
if (exists(f) && !overwrite) {
|
if (exists(f) && !overwrite) {
|
||||||
throw new IOException("File already exists: "+f);
|
throw new FileAlreadyExistsException("File already exists: " + f);
|
||||||
}
|
}
|
||||||
Path parent = f.getParent();
|
Path parent = f.getParent();
|
||||||
if (parent != null && !mkdirs(parent)) {
|
if (parent != null && !mkdirs(parent)) {
|
||||||
|
@ -272,7 +277,7 @@ public class RawLocalFileSystem extends FileSystem {
|
||||||
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) {
|
if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) {
|
||||||
throw new IOException("File already exists: "+f);
|
throw new FileAlreadyExistsException("File already exists: " + f);
|
||||||
}
|
}
|
||||||
return new FSDataOutputStream(new BufferedOutputStream(
|
return new FSDataOutputStream(new BufferedOutputStream(
|
||||||
new LocalFSFileOutputStream(f, false), bufferSize), statistics);
|
new LocalFSFileOutputStream(f, false), bufferSize), statistics);
|
||||||
|
@ -344,6 +349,10 @@ public class RawLocalFileSystem extends FileSystem {
|
||||||
@Override
|
@Override
|
||||||
public boolean delete(Path p, boolean recursive) throws IOException {
|
public boolean delete(Path p, boolean recursive) throws IOException {
|
||||||
File f = pathToFile(p);
|
File f = pathToFile(p);
|
||||||
|
if (!f.exists()) {
|
||||||
|
//no path, return false "nothing to delete"
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (f.isFile()) {
|
if (f.isFile()) {
|
||||||
return f.delete();
|
return f.delete();
|
||||||
} else if (!recursive && f.isDirectory() &&
|
} else if (!recursive && f.isDirectory() &&
|
||||||
|
@ -406,10 +415,14 @@ public class RawLocalFileSystem extends FileSystem {
|
||||||
if(parent != null) {
|
if(parent != null) {
|
||||||
File parent2f = pathToFile(parent);
|
File parent2f = pathToFile(parent);
|
||||||
if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
|
if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
|
||||||
throw new FileAlreadyExistsException("Parent path is not a directory: "
|
throw new ParentNotDirectoryException("Parent path is not a directory: "
|
||||||
+ parent);
|
+ parent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (p2f.exists() && !p2f.isDirectory()) {
|
||||||
|
throw new FileNotFoundException("Destination exists" +
|
||||||
|
" and is not a directory: " + p2f.getCanonicalPath());
|
||||||
|
}
|
||||||
return (parent == null || mkdirs(parent)) &&
|
return (parent == null || mkdirs(parent)) &&
|
||||||
(p2f.mkdir() || p2f.isDirectory());
|
(p2f.mkdir() || p2f.isDirectory());
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.fs.ftp;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
import java.net.ConnectException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -33,11 +34,14 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -56,6 +60,12 @@ public class FTPFileSystem extends FileSystem {
|
||||||
public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
|
public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
|
||||||
|
|
||||||
public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
|
public static final int DEFAULT_BLOCK_SIZE = 4 * 1024;
|
||||||
|
public static final String FS_FTP_USER_PREFIX = "fs.ftp.user.";
|
||||||
|
public static final String FS_FTP_HOST = "fs.ftp.host";
|
||||||
|
public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port";
|
||||||
|
public static final String FS_FTP_PASSWORD_PREFIX = "fs.ftp.password.";
|
||||||
|
public static final String E_SAME_DIRECTORY_ONLY =
|
||||||
|
"only same directory renames are supported";
|
||||||
|
|
||||||
private URI uri;
|
private URI uri;
|
||||||
|
|
||||||
|
@ -75,11 +85,11 @@ public class FTPFileSystem extends FileSystem {
|
||||||
super.initialize(uri, conf);
|
super.initialize(uri, conf);
|
||||||
// get host information from uri (overrides info in conf)
|
// get host information from uri (overrides info in conf)
|
||||||
String host = uri.getHost();
|
String host = uri.getHost();
|
||||||
host = (host == null) ? conf.get("fs.ftp.host", null) : host;
|
host = (host == null) ? conf.get(FS_FTP_HOST, null) : host;
|
||||||
if (host == null) {
|
if (host == null) {
|
||||||
throw new IOException("Invalid host specified");
|
throw new IOException("Invalid host specified");
|
||||||
}
|
}
|
||||||
conf.set("fs.ftp.host", host);
|
conf.set(FS_FTP_HOST, host);
|
||||||
|
|
||||||
// get port information from uri, (overrides info in conf)
|
// get port information from uri, (overrides info in conf)
|
||||||
int port = uri.getPort();
|
int port = uri.getPort();
|
||||||
|
@ -96,11 +106,11 @@ public class FTPFileSystem extends FileSystem {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
String[] userPasswdInfo = userAndPassword.split(":");
|
String[] userPasswdInfo = userAndPassword.split(":");
|
||||||
conf.set("fs.ftp.user." + host, userPasswdInfo[0]);
|
conf.set(FS_FTP_USER_PREFIX + host, userPasswdInfo[0]);
|
||||||
if (userPasswdInfo.length > 1) {
|
if (userPasswdInfo.length > 1) {
|
||||||
conf.set("fs.ftp.password." + host, userPasswdInfo[1]);
|
conf.set(FS_FTP_PASSWORD_PREFIX + host, userPasswdInfo[1]);
|
||||||
} else {
|
} else {
|
||||||
conf.set("fs.ftp.password." + host, null);
|
conf.set(FS_FTP_PASSWORD_PREFIX + host, null);
|
||||||
}
|
}
|
||||||
setConf(conf);
|
setConf(conf);
|
||||||
this.uri = uri;
|
this.uri = uri;
|
||||||
|
@ -115,23 +125,24 @@ public class FTPFileSystem extends FileSystem {
|
||||||
private FTPClient connect() throws IOException {
|
private FTPClient connect() throws IOException {
|
||||||
FTPClient client = null;
|
FTPClient client = null;
|
||||||
Configuration conf = getConf();
|
Configuration conf = getConf();
|
||||||
String host = conf.get("fs.ftp.host");
|
String host = conf.get(FS_FTP_HOST);
|
||||||
int port = conf.getInt("fs.ftp.host.port", FTP.DEFAULT_PORT);
|
int port = conf.getInt(FS_FTP_HOST_PORT, FTP.DEFAULT_PORT);
|
||||||
String user = conf.get("fs.ftp.user." + host);
|
String user = conf.get(FS_FTP_USER_PREFIX + host);
|
||||||
String password = conf.get("fs.ftp.password." + host);
|
String password = conf.get(FS_FTP_PASSWORD_PREFIX + host);
|
||||||
client = new FTPClient();
|
client = new FTPClient();
|
||||||
client.connect(host, port);
|
client.connect(host, port);
|
||||||
int reply = client.getReplyCode();
|
int reply = client.getReplyCode();
|
||||||
if (!FTPReply.isPositiveCompletion(reply)) {
|
if (!FTPReply.isPositiveCompletion(reply)) {
|
||||||
throw new IOException("Server - " + host
|
throw NetUtils.wrapException(host, port,
|
||||||
+ " refused connection on port - " + port);
|
NetUtils.UNKNOWN_HOST, 0,
|
||||||
|
new ConnectException("Server response " + reply));
|
||||||
} else if (client.login(user, password)) {
|
} else if (client.login(user, password)) {
|
||||||
client.setFileTransferMode(FTP.BLOCK_TRANSFER_MODE);
|
client.setFileTransferMode(FTP.BLOCK_TRANSFER_MODE);
|
||||||
client.setFileType(FTP.BINARY_FILE_TYPE);
|
client.setFileType(FTP.BINARY_FILE_TYPE);
|
||||||
client.setBufferSize(DEFAULT_BUFFER_SIZE);
|
client.setBufferSize(DEFAULT_BUFFER_SIZE);
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("Login failed on server - " + host + ", port - "
|
throw new IOException("Login failed on server - " + host + ", port - "
|
||||||
+ port);
|
+ port + " as user '" + user + "'");
|
||||||
}
|
}
|
||||||
|
|
||||||
return client;
|
return client;
|
||||||
|
@ -179,7 +190,7 @@ public class FTPFileSystem extends FileSystem {
|
||||||
FileStatus fileStat = getFileStatus(client, absolute);
|
FileStatus fileStat = getFileStatus(client, absolute);
|
||||||
if (fileStat.isDirectory()) {
|
if (fileStat.isDirectory()) {
|
||||||
disconnect(client);
|
disconnect(client);
|
||||||
throw new IOException("Path " + file + " is a directory.");
|
throw new FileNotFoundException("Path " + file + " is a directory.");
|
||||||
}
|
}
|
||||||
client.allocate(bufferSize);
|
client.allocate(bufferSize);
|
||||||
Path parent = absolute.getParent();
|
Path parent = absolute.getParent();
|
||||||
|
@ -214,12 +225,18 @@ public class FTPFileSystem extends FileSystem {
|
||||||
final FTPClient client = connect();
|
final FTPClient client = connect();
|
||||||
Path workDir = new Path(client.printWorkingDirectory());
|
Path workDir = new Path(client.printWorkingDirectory());
|
||||||
Path absolute = makeAbsolute(workDir, file);
|
Path absolute = makeAbsolute(workDir, file);
|
||||||
if (exists(client, file)) {
|
FileStatus status;
|
||||||
if (overwrite) {
|
try {
|
||||||
delete(client, file);
|
status = getFileStatus(client, file);
|
||||||
|
} catch (FileNotFoundException fnfe) {
|
||||||
|
status = null;
|
||||||
|
}
|
||||||
|
if (status != null) {
|
||||||
|
if (overwrite && !status.isDirectory()) {
|
||||||
|
delete(client, file, false);
|
||||||
} else {
|
} else {
|
||||||
disconnect(client);
|
disconnect(client);
|
||||||
throw new IOException("File already exists: " + file);
|
throw new FileAlreadyExistsException("File already exists: " + file);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -272,14 +289,13 @@ public class FTPFileSystem extends FileSystem {
|
||||||
* Convenience method, so that we don't open a new connection when using this
|
* Convenience method, so that we don't open a new connection when using this
|
||||||
* method from within another method. Otherwise every API invocation incurs
|
* method from within another method. Otherwise every API invocation incurs
|
||||||
* the overhead of opening/closing a TCP connection.
|
* the overhead of opening/closing a TCP connection.
|
||||||
|
* @throws IOException on IO problems other than FileNotFoundException
|
||||||
*/
|
*/
|
||||||
private boolean exists(FTPClient client, Path file) {
|
private boolean exists(FTPClient client, Path file) throws IOException {
|
||||||
try {
|
try {
|
||||||
return getFileStatus(client, file) != null;
|
return getFileStatus(client, file) != null;
|
||||||
} catch (FileNotFoundException fnfe) {
|
} catch (FileNotFoundException fnfe) {
|
||||||
return false;
|
return false;
|
||||||
} catch (IOException ioe) {
|
|
||||||
throw new FTPException("Failed to get file status", ioe);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,12 +310,6 @@ public class FTPFileSystem extends FileSystem {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @deprecated Use delete(Path, boolean) instead */
|
|
||||||
@Deprecated
|
|
||||||
private boolean delete(FTPClient client, Path file) throws IOException {
|
|
||||||
return delete(client, file, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convenience method, so that we don't open a new connection when using this
|
* Convenience method, so that we don't open a new connection when using this
|
||||||
* method from within another method. Otherwise every API invocation incurs
|
* method from within another method. Otherwise every API invocation incurs
|
||||||
|
@ -310,9 +320,14 @@ public class FTPFileSystem extends FileSystem {
|
||||||
Path workDir = new Path(client.printWorkingDirectory());
|
Path workDir = new Path(client.printWorkingDirectory());
|
||||||
Path absolute = makeAbsolute(workDir, file);
|
Path absolute = makeAbsolute(workDir, file);
|
||||||
String pathName = absolute.toUri().getPath();
|
String pathName = absolute.toUri().getPath();
|
||||||
FileStatus fileStat = getFileStatus(client, absolute);
|
try {
|
||||||
if (fileStat.isFile()) {
|
FileStatus fileStat = getFileStatus(client, absolute);
|
||||||
return client.deleteFile(pathName);
|
if (fileStat.isFile()) {
|
||||||
|
return client.deleteFile(pathName);
|
||||||
|
}
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
//the file is not there
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
FileStatus[] dirEntries = listStatus(client, absolute);
|
FileStatus[] dirEntries = listStatus(client, absolute);
|
||||||
if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
|
if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
|
||||||
|
@ -491,7 +506,7 @@ public class FTPFileSystem extends FileSystem {
|
||||||
created = created && client.makeDirectory(pathName);
|
created = created && client.makeDirectory(pathName);
|
||||||
}
|
}
|
||||||
} else if (isFile(client, absolute)) {
|
} else if (isFile(client, absolute)) {
|
||||||
throw new IOException(String.format(
|
throw new ParentNotDirectoryException(String.format(
|
||||||
"Can't make directory for path %s since it is a file.", absolute));
|
"Can't make directory for path %s since it is a file.", absolute));
|
||||||
}
|
}
|
||||||
return created;
|
return created;
|
||||||
|
@ -527,6 +542,23 @@ public class FTPFileSystem extends FileSystem {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Probe for a path being a parent of another
|
||||||
|
* @param parent parent path
|
||||||
|
* @param child possible child path
|
||||||
|
* @return true if the parent's path matches the start of the child's
|
||||||
|
*/
|
||||||
|
private boolean isParentOf(Path parent, Path child) {
|
||||||
|
URI parentURI = parent.toUri();
|
||||||
|
String parentPath = parentURI.getPath();
|
||||||
|
if (!parentPath.endsWith("/")) {
|
||||||
|
parentPath += "/";
|
||||||
|
}
|
||||||
|
URI childURI = child.toUri();
|
||||||
|
String childPath = childURI.getPath();
|
||||||
|
return childPath.startsWith(parentPath);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convenience method, so that we don't open a new connection when using this
|
* Convenience method, so that we don't open a new connection when using this
|
||||||
* method from within another method. Otherwise every API invocation incurs
|
* method from within another method. Otherwise every API invocation incurs
|
||||||
|
@ -544,20 +576,31 @@ public class FTPFileSystem extends FileSystem {
|
||||||
Path absoluteSrc = makeAbsolute(workDir, src);
|
Path absoluteSrc = makeAbsolute(workDir, src);
|
||||||
Path absoluteDst = makeAbsolute(workDir, dst);
|
Path absoluteDst = makeAbsolute(workDir, dst);
|
||||||
if (!exists(client, absoluteSrc)) {
|
if (!exists(client, absoluteSrc)) {
|
||||||
throw new IOException("Source path " + src + " does not exist");
|
throw new FileNotFoundException("Source path " + src + " does not exist");
|
||||||
|
}
|
||||||
|
if (isDirectory(absoluteDst)) {
|
||||||
|
// destination is a directory: rename goes underneath it with the
|
||||||
|
// source name
|
||||||
|
absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
|
||||||
}
|
}
|
||||||
if (exists(client, absoluteDst)) {
|
if (exists(client, absoluteDst)) {
|
||||||
throw new IOException("Destination path " + dst
|
throw new FileAlreadyExistsException("Destination path " + dst
|
||||||
+ " already exist, cannot rename!");
|
+ " already exists");
|
||||||
}
|
}
|
||||||
String parentSrc = absoluteSrc.getParent().toUri().toString();
|
String parentSrc = absoluteSrc.getParent().toUri().toString();
|
||||||
String parentDst = absoluteDst.getParent().toUri().toString();
|
String parentDst = absoluteDst.getParent().toUri().toString();
|
||||||
String from = src.getName();
|
if (isParentOf(absoluteSrc, absoluteDst)) {
|
||||||
String to = dst.getName();
|
throw new IOException("Cannot rename " + absoluteSrc + " under itself"
|
||||||
if (!parentSrc.equals(parentDst)) {
|
+ " : "+ absoluteDst);
|
||||||
throw new IOException("Cannot rename parent(source): " + parentSrc
|
|
||||||
+ ", parent(destination): " + parentDst);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!parentSrc.equals(parentDst)) {
|
||||||
|
throw new IOException("Cannot rename source: " + absoluteSrc
|
||||||
|
+ " to " + absoluteDst
|
||||||
|
+ " -"+ E_SAME_DIRECTORY_ONLY);
|
||||||
|
}
|
||||||
|
String from = absoluteSrc.getName();
|
||||||
|
String to = absoluteDst.getName();
|
||||||
client.changeWorkingDirectory(parentSrc);
|
client.changeWorkingDirectory(parentSrc);
|
||||||
boolean renamed = client.rename(from, to);
|
boolean renamed = client.rename(from, to);
|
||||||
return renamed;
|
return renamed;
|
||||||
|
|
|
@ -103,7 +103,7 @@ public class FTPInputStream extends FSInputStream {
|
||||||
@Override
|
@Override
|
||||||
public synchronized void close() throws IOException {
|
public synchronized void close() throws IOException {
|
||||||
if (closed) {
|
if (closed) {
|
||||||
throw new IOException("Stream closed");
|
return;
|
||||||
}
|
}
|
||||||
super.close();
|
super.close();
|
||||||
closed = true;
|
closed = true;
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -226,7 +227,7 @@ public class S3FileSystem extends FileSystem {
|
||||||
if (overwrite) {
|
if (overwrite) {
|
||||||
delete(file, true);
|
delete(file, true);
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("File already exists: " + file);
|
throw new FileAlreadyExistsException("File already exists: " + file);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Path parent = file.getParent();
|
Path parent = file.getParent();
|
||||||
|
|
|
@ -22,6 +22,7 @@ import static org.apache.hadoop.fs.s3native.NativeS3FileSystem.PATH_DELIMITER;
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.EOFException;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileInputStream;
|
import java.io.FileInputStream;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
@ -32,17 +33,19 @@ import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||||
import org.apache.hadoop.fs.s3.S3Credentials;
|
import org.apache.hadoop.fs.s3.S3Credentials;
|
||||||
import org.apache.hadoop.fs.s3.S3Exception;
|
import org.apache.hadoop.fs.s3.S3Exception;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.jets3t.service.S3Service;
|
import org.jets3t.service.S3Service;
|
||||||
import org.jets3t.service.S3ServiceException;
|
import org.jets3t.service.S3ServiceException;
|
||||||
import org.jets3t.service.ServiceException;
|
import org.jets3t.service.ServiceException;
|
||||||
import org.jets3t.service.StorageObjectsChunk;
|
import org.jets3t.service.StorageObjectsChunk;
|
||||||
|
import org.jets3t.service.impl.rest.HttpException;
|
||||||
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
|
import org.jets3t.service.impl.rest.httpclient.RestS3Service;
|
||||||
import org.jets3t.service.model.MultipartPart;
|
import org.jets3t.service.model.MultipartPart;
|
||||||
import org.jets3t.service.model.MultipartUpload;
|
import org.jets3t.service.model.MultipartUpload;
|
||||||
|
@ -51,6 +54,8 @@ import org.jets3t.service.model.S3Object;
|
||||||
import org.jets3t.service.model.StorageObject;
|
import org.jets3t.service.model.StorageObject;
|
||||||
import org.jets3t.service.security.AWSCredentials;
|
import org.jets3t.service.security.AWSCredentials;
|
||||||
import org.jets3t.service.utils.MultipartUtils;
|
import org.jets3t.service.utils.MultipartUtils;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
|
@ -66,8 +71,8 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
|
|
||||||
private String serverSideEncryptionAlgorithm;
|
private String serverSideEncryptionAlgorithm;
|
||||||
|
|
||||||
public static final Log LOG =
|
public static final Logger LOG =
|
||||||
LogFactory.getLog(Jets3tNativeFileSystemStore.class);
|
LoggerFactory.getLogger(Jets3tNativeFileSystemStore.class);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void initialize(URI uri, Configuration conf) throws IOException {
|
public void initialize(URI uri, Configuration conf) throws IOException {
|
||||||
|
@ -79,7 +84,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
s3Credentials.getSecretAccessKey());
|
s3Credentials.getSecretAccessKey());
|
||||||
this.s3Service = new RestS3Service(awsCredentials);
|
this.s3Service = new RestS3Service(awsCredentials);
|
||||||
} catch (S3ServiceException e) {
|
} catch (S3ServiceException e) {
|
||||||
handleS3ServiceException(e);
|
handleException(e);
|
||||||
}
|
}
|
||||||
multipartEnabled =
|
multipartEnabled =
|
||||||
conf.getBoolean("fs.s3n.multipart.uploads.enabled", false);
|
conf.getBoolean("fs.s3n.multipart.uploads.enabled", false);
|
||||||
|
@ -115,16 +120,10 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
object.setMd5Hash(md5Hash);
|
object.setMd5Hash(md5Hash);
|
||||||
}
|
}
|
||||||
s3Service.putObject(bucket, object);
|
s3Service.putObject(bucket, object);
|
||||||
} catch (S3ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleS3ServiceException(e);
|
handleException(e, key);
|
||||||
} finally {
|
} finally {
|
||||||
if (in != null) {
|
IOUtils.closeStream(in);
|
||||||
try {
|
|
||||||
in.close();
|
|
||||||
} catch (IOException e) {
|
|
||||||
// ignore
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,10 +146,8 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
try {
|
try {
|
||||||
mpUtils.uploadObjects(bucket.getName(), s3Service,
|
mpUtils.uploadObjects(bucket.getName(), s3Service,
|
||||||
objectsToUploadAsMultipart, null);
|
objectsToUploadAsMultipart, null);
|
||||||
} catch (ServiceException e) {
|
|
||||||
handleServiceException(e);
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new S3Exception(e);
|
handleException(e, key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,8 +160,8 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
object.setContentLength(0);
|
object.setContentLength(0);
|
||||||
object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
|
object.setServerSideEncryptionAlgorithm(serverSideEncryptionAlgorithm);
|
||||||
s3Service.putObject(bucket, object);
|
s3Service.putObject(bucket, object);
|
||||||
} catch (S3ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleS3ServiceException(e);
|
handleException(e, key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,20 +169,21 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
public FileMetadata retrieveMetadata(String key) throws IOException {
|
public FileMetadata retrieveMetadata(String key) throws IOException {
|
||||||
StorageObject object = null;
|
StorageObject object = null;
|
||||||
try {
|
try {
|
||||||
if(LOG.isDebugEnabled()) {
|
LOG.debug("Getting metadata for key: {} from bucket: {}",
|
||||||
LOG.debug("Getting metadata for key: " + key + " from bucket:" + bucket.getName());
|
key, bucket.getName());
|
||||||
}
|
|
||||||
object = s3Service.getObjectDetails(bucket.getName(), key);
|
object = s3Service.getObjectDetails(bucket.getName(), key);
|
||||||
return new FileMetadata(key, object.getContentLength(),
|
return new FileMetadata(key, object.getContentLength(),
|
||||||
object.getLastModifiedDate().getTime());
|
object.getLastModifiedDate().getTime());
|
||||||
|
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
// Following is brittle. Is there a better way?
|
try {
|
||||||
if ("NoSuchKey".equals(e.getErrorCode())) {
|
// process
|
||||||
return null; //return null if key not found
|
handleException(e, key);
|
||||||
|
return null;
|
||||||
|
} catch (FileNotFoundException fnfe) {
|
||||||
|
// and downgrade missing files
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
handleServiceException(e);
|
|
||||||
return null; //never returned - keep compiler happy
|
|
||||||
} finally {
|
} finally {
|
||||||
if (object != null) {
|
if (object != null) {
|
||||||
object.closeDataInputStream();
|
object.closeDataInputStream();
|
||||||
|
@ -204,13 +202,12 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
@Override
|
@Override
|
||||||
public InputStream retrieve(String key) throws IOException {
|
public InputStream retrieve(String key) throws IOException {
|
||||||
try {
|
try {
|
||||||
if(LOG.isDebugEnabled()) {
|
LOG.debug("Getting key: {} from bucket: {}",
|
||||||
LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName());
|
key, bucket.getName());
|
||||||
}
|
|
||||||
S3Object object = s3Service.getObject(bucket.getName(), key);
|
S3Object object = s3Service.getObject(bucket.getName(), key);
|
||||||
return object.getDataInputStream();
|
return object.getDataInputStream();
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleServiceException(key, e);
|
handleException(e, key);
|
||||||
return null; //return null if key not found
|
return null; //return null if key not found
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -228,15 +225,14 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
public InputStream retrieve(String key, long byteRangeStart)
|
public InputStream retrieve(String key, long byteRangeStart)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
try {
|
try {
|
||||||
if(LOG.isDebugEnabled()) {
|
LOG.debug("Getting key: {} from bucket: {} with byteRangeStart: {}",
|
||||||
LOG.debug("Getting key: " + key + " from bucket:" + bucket.getName() + " with byteRangeStart: " + byteRangeStart);
|
key, bucket.getName(), byteRangeStart);
|
||||||
}
|
|
||||||
S3Object object = s3Service.getObject(bucket, key, null, null, null,
|
S3Object object = s3Service.getObject(bucket, key, null, null, null,
|
||||||
null, byteRangeStart, null);
|
null, byteRangeStart, null);
|
||||||
return object.getDataInputStream();
|
return object.getDataInputStream();
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleServiceException(key, e);
|
handleException(e, key);
|
||||||
return null; //return null if key not found
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,17 +250,19 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* list objects
|
||||||
* @return
|
* @param prefix prefix
|
||||||
* This method returns null if the list could not be populated
|
* @param delimiter delimiter
|
||||||
* due to S3 giving ServiceException
|
* @param maxListingLength max no. of entries
|
||||||
* @throws IOException
|
* @param priorLastKey last key in any previous search
|
||||||
|
* @return a list of matches
|
||||||
|
* @throws IOException on any reported failure
|
||||||
*/
|
*/
|
||||||
|
|
||||||
private PartialListing list(String prefix, String delimiter,
|
private PartialListing list(String prefix, String delimiter,
|
||||||
int maxListingLength, String priorLastKey) throws IOException {
|
int maxListingLength, String priorLastKey) throws IOException {
|
||||||
try {
|
try {
|
||||||
if (prefix.length() > 0 && !prefix.endsWith(PATH_DELIMITER)) {
|
if (!prefix.isEmpty() && !prefix.endsWith(PATH_DELIMITER)) {
|
||||||
prefix += PATH_DELIMITER;
|
prefix += PATH_DELIMITER;
|
||||||
}
|
}
|
||||||
StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
|
StorageObjectsChunk chunk = s3Service.listObjectsChunked(bucket.getName(),
|
||||||
|
@ -279,24 +277,20 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
}
|
}
|
||||||
return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
|
return new PartialListing(chunk.getPriorLastKey(), fileMetadata,
|
||||||
chunk.getCommonPrefixes());
|
chunk.getCommonPrefixes());
|
||||||
} catch (S3ServiceException e) {
|
|
||||||
handleS3ServiceException(e);
|
|
||||||
return null; //never returned - keep compiler happy
|
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleServiceException(e);
|
handleException(e, prefix);
|
||||||
return null; //return null if list could not be populated
|
return null; // never returned - keep compiler happy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete(String key) throws IOException {
|
public void delete(String key) throws IOException {
|
||||||
try {
|
try {
|
||||||
if(LOG.isDebugEnabled()) {
|
LOG.debug("Deleting key: {} from bucket: {}",
|
||||||
LOG.debug("Deleting key:" + key + "from bucket" + bucket.getName());
|
key, bucket.getName());
|
||||||
}
|
|
||||||
s3Service.deleteObject(bucket, key);
|
s3Service.deleteObject(bucket, key);
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleServiceException(key, e);
|
handleException(e, key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,7 +298,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
try {
|
try {
|
||||||
s3Service.renameObject(bucket.getName(), srcKey, new S3Object(dstKey));
|
s3Service.renameObject(bucket.getName(), srcKey, new S3Object(dstKey));
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleServiceException(e);
|
handleException(e, srcKey);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,7 +323,7 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
|
s3Service.copyObject(bucket.getName(), srcKey, bucket.getName(),
|
||||||
dstObject, false);
|
dstObject, false);
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleServiceException(srcKey, e);
|
handleException(e, srcKey);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,19 +358,22 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
Collections.reverse(listedParts);
|
Collections.reverse(listedParts);
|
||||||
s3Service.multipartCompleteUpload(multipartUpload, listedParts);
|
s3Service.multipartCompleteUpload(multipartUpload, listedParts);
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
handleServiceException(e);
|
handleException(e, srcObject.getKey());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void purge(String prefix) throws IOException {
|
public void purge(String prefix) throws IOException {
|
||||||
|
String key = "";
|
||||||
try {
|
try {
|
||||||
S3Object[] objects = s3Service.listObjects(bucket.getName(), prefix, null);
|
S3Object[] objects =
|
||||||
|
s3Service.listObjects(bucket.getName(), prefix, null);
|
||||||
for (S3Object object : objects) {
|
for (S3Object object : objects) {
|
||||||
s3Service.deleteObject(bucket, object.getKey());
|
key = object.getKey();
|
||||||
|
s3Service.deleteObject(bucket, key);
|
||||||
}
|
}
|
||||||
} catch (S3ServiceException e) {
|
} catch (S3ServiceException e) {
|
||||||
handleS3ServiceException(e);
|
handleException(e, key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,39 +387,97 @@ class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
|
||||||
sb.append(object.getKey()).append("\n");
|
sb.append(object.getKey()).append("\n");
|
||||||
}
|
}
|
||||||
} catch (S3ServiceException e) {
|
} catch (S3ServiceException e) {
|
||||||
handleS3ServiceException(e);
|
handleException(e);
|
||||||
}
|
}
|
||||||
System.out.println(sb);
|
System.out.println(sb);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void handleServiceException(String key, ServiceException e) throws IOException {
|
/**
|
||||||
if ("NoSuchKey".equals(e.getErrorCode())) {
|
* Handle any service exception by translating it into an IOException
|
||||||
throw new FileNotFoundException("Key '" + key + "' does not exist in S3");
|
* @param e exception
|
||||||
|
* @throws IOException exception -always
|
||||||
|
*/
|
||||||
|
private void handleException(Exception e) throws IOException {
|
||||||
|
throw processException(e, e, "");
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Handle any service exception by translating it into an IOException
|
||||||
|
* @param e exception
|
||||||
|
* @param key key sought from object store
|
||||||
|
|
||||||
|
* @throws IOException exception -always
|
||||||
|
*/
|
||||||
|
private void handleException(Exception e, String key) throws IOException {
|
||||||
|
throw processException(e, e, key);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle any service exception by translating it into an IOException
|
||||||
|
* @param thrown exception
|
||||||
|
* @param original original exception -thrown if no other translation could
|
||||||
|
* be made
|
||||||
|
* @param key key sought from object store or "" for undefined
|
||||||
|
* @return an exception to throw. If isProcessingCause==true this may be null.
|
||||||
|
*/
|
||||||
|
private IOException processException(Throwable thrown, Throwable original,
|
||||||
|
String key) {
|
||||||
|
IOException result;
|
||||||
|
if (thrown.getCause() != null) {
|
||||||
|
// recurse down
|
||||||
|
result = processException(thrown.getCause(), original, key);
|
||||||
|
} else if (thrown instanceof HttpException) {
|
||||||
|
// nested HttpException - examine error code and react
|
||||||
|
HttpException httpException = (HttpException) thrown;
|
||||||
|
String responseMessage = httpException.getResponseMessage();
|
||||||
|
int responseCode = httpException.getResponseCode();
|
||||||
|
String bucketName = "s3n://" + bucket.getName();
|
||||||
|
String text = String.format("%s : %03d : %s",
|
||||||
|
bucketName,
|
||||||
|
responseCode,
|
||||||
|
responseMessage);
|
||||||
|
String filename = !key.isEmpty() ? (bucketName + "/" + key) : text;
|
||||||
|
IOException ioe;
|
||||||
|
switch (responseCode) {
|
||||||
|
case 404:
|
||||||
|
result = new FileNotFoundException(filename);
|
||||||
|
break;
|
||||||
|
case 416: // invalid range
|
||||||
|
result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF
|
||||||
|
+": " + filename);
|
||||||
|
break;
|
||||||
|
case 403: //forbidden
|
||||||
|
result = new AccessControlException("Permission denied"
|
||||||
|
+": " + filename);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
result = new IOException(text);
|
||||||
|
}
|
||||||
|
result.initCause(thrown);
|
||||||
|
} else if (thrown instanceof S3ServiceException) {
|
||||||
|
S3ServiceException se = (S3ServiceException) thrown;
|
||||||
|
LOG.debug(
|
||||||
|
"S3ServiceException: {}: {} : {}",
|
||||||
|
se.getS3ErrorCode(), se.getS3ErrorMessage(), se, se);
|
||||||
|
if ("InvalidRange".equals(se.getS3ErrorCode())) {
|
||||||
|
result = new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||||
|
} else {
|
||||||
|
result = new S3Exception(se);
|
||||||
|
}
|
||||||
|
} else if (thrown instanceof ServiceException) {
|
||||||
|
ServiceException se = (ServiceException) thrown;
|
||||||
|
LOG.debug("S3ServiceException: {}: {} : {}",
|
||||||
|
se.getErrorCode(), se.toString(), se, se);
|
||||||
|
result = new S3Exception(se);
|
||||||
|
} else if (thrown instanceof IOException) {
|
||||||
|
result = (IOException) thrown;
|
||||||
} else {
|
} else {
|
||||||
handleServiceException(e);
|
// here there is no exception derived yet.
|
||||||
|
// this means no inner cause, and no translation made yet.
|
||||||
|
// convert the original to an IOException -rather than just the
|
||||||
|
// exception at the base of the tree
|
||||||
|
result = new S3Exception(original);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
private void handleS3ServiceException(S3ServiceException e) throws IOException {
|
return result;
|
||||||
if (e.getCause() instanceof IOException) {
|
|
||||||
throw (IOException) e.getCause();
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("S3 Error code: " + e.getS3ErrorCode() + "; S3 Error message: " + e.getS3ErrorMessage());
|
|
||||||
}
|
|
||||||
throw new S3Exception(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void handleServiceException(ServiceException e) throws IOException {
|
|
||||||
if (e.getCause() instanceof IOException) {
|
|
||||||
throw (IOException) e.getCause();
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Got ServiceException with Error code: " + e.getErrorCode() + ";and Error message: " + e.getErrorMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
package org.apache.hadoop.fs.s3native;
|
package org.apache.hadoop.fs.s3native;
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
|
import java.io.EOFException;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.FileOutputStream;
|
import java.io.FileOutputStream;
|
||||||
|
@ -37,15 +38,16 @@ import java.util.Set;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BufferedFSInputStream;
|
import org.apache.hadoop.fs.BufferedFSInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||||
import org.apache.hadoop.fs.FSInputStream;
|
import org.apache.hadoop.fs.FSInputStream;
|
||||||
|
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -55,6 +57,8 @@ import org.apache.hadoop.io.retry.RetryPolicies;
|
||||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||||
import org.apache.hadoop.io.retry.RetryProxy;
|
import org.apache.hadoop.io.retry.RetryProxy;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -81,8 +85,8 @@ import org.apache.hadoop.util.Progressable;
|
||||||
@InterfaceStability.Stable
|
@InterfaceStability.Stable
|
||||||
public class NativeS3FileSystem extends FileSystem {
|
public class NativeS3FileSystem extends FileSystem {
|
||||||
|
|
||||||
public static final Log LOG =
|
public static final Logger LOG =
|
||||||
LogFactory.getLog(NativeS3FileSystem.class);
|
LoggerFactory.getLogger(NativeS3FileSystem.class);
|
||||||
|
|
||||||
private static final String FOLDER_SUFFIX = "_$folder$";
|
private static final String FOLDER_SUFFIX = "_$folder$";
|
||||||
static final String PATH_DELIMITER = Path.SEPARATOR;
|
static final String PATH_DELIMITER = Path.SEPARATOR;
|
||||||
|
@ -97,6 +101,7 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
private long pos = 0;
|
private long pos = 0;
|
||||||
|
|
||||||
public NativeS3FsInputStream(NativeFileSystemStore store, Statistics statistics, InputStream in, String key) {
|
public NativeS3FsInputStream(NativeFileSystemStore store, Statistics statistics, InputStream in, String key) {
|
||||||
|
Preconditions.checkNotNull(in, "Null input stream");
|
||||||
this.store = store;
|
this.store = store;
|
||||||
this.statistics = statistics;
|
this.statistics = statistics;
|
||||||
this.in = in;
|
this.in = in;
|
||||||
|
@ -105,13 +110,20 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized int read() throws IOException {
|
public synchronized int read() throws IOException {
|
||||||
int result = -1;
|
int result;
|
||||||
try {
|
try {
|
||||||
result = in.read();
|
result = in.read();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.info("Received IOException while reading '" + key + "', attempting to reopen.");
|
LOG.info("Received IOException while reading '{}', attempting to reopen",
|
||||||
seek(pos);
|
key);
|
||||||
result = in.read();
|
LOG.debug("{}", e, e);
|
||||||
|
try {
|
||||||
|
seek(pos);
|
||||||
|
result = in.read();
|
||||||
|
} catch (EOFException eof) {
|
||||||
|
LOG.debug("EOF on input stream read: {}", eof, eof);
|
||||||
|
result = -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (result != -1) {
|
if (result != -1) {
|
||||||
pos++;
|
pos++;
|
||||||
|
@ -124,12 +136,17 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
@Override
|
@Override
|
||||||
public synchronized int read(byte[] b, int off, int len)
|
public synchronized int read(byte[] b, int off, int len)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
if (in == null) {
|
||||||
|
throw new EOFException("Cannot read closed stream");
|
||||||
|
}
|
||||||
int result = -1;
|
int result = -1;
|
||||||
try {
|
try {
|
||||||
result = in.read(b, off, len);
|
result = in.read(b, off, len);
|
||||||
|
} catch (EOFException eof) {
|
||||||
|
throw eof;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.info("Received IOException while reading '" + key + "', attempting to reopen.");
|
LOG.info( "Received IOException while reading '{}'," +
|
||||||
|
" attempting to reopen.", key);
|
||||||
seek(pos);
|
seek(pos);
|
||||||
result = in.read(b, off, len);
|
result = in.read(b, off, len);
|
||||||
}
|
}
|
||||||
|
@ -143,17 +160,53 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public synchronized void close() throws IOException {
|
||||||
in.close();
|
closeInnerStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close the inner stream if not null. Even if an exception
|
||||||
|
* is raised during the close, the field is set to null
|
||||||
|
* @throws IOException if raised by the close() operation.
|
||||||
|
*/
|
||||||
|
private void closeInnerStream() throws IOException {
|
||||||
|
if (in != null) {
|
||||||
|
try {
|
||||||
|
in.close();
|
||||||
|
} finally {
|
||||||
|
in = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update inner stream with a new stream and position
|
||||||
|
* @param newStream new stream -must not be null
|
||||||
|
* @param newpos new position
|
||||||
|
* @throws IOException IO exception on a failure to close the existing
|
||||||
|
* stream.
|
||||||
|
*/
|
||||||
|
private synchronized void updateInnerStream(InputStream newStream, long newpos) throws IOException {
|
||||||
|
Preconditions.checkNotNull(newStream, "Null newstream argument");
|
||||||
|
closeInnerStream();
|
||||||
|
in = newStream;
|
||||||
|
this.pos = newpos;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void seek(long pos) throws IOException {
|
public synchronized void seek(long newpos) throws IOException {
|
||||||
in.close();
|
if (newpos < 0) {
|
||||||
LOG.info("Opening key '" + key + "' for reading at position '" + pos + "'");
|
throw new EOFException(
|
||||||
in = store.retrieve(key, pos);
|
FSExceptionMessages.NEGATIVE_SEEK);
|
||||||
this.pos = pos;
|
}
|
||||||
|
if (pos != newpos) {
|
||||||
|
// the seek is attempting to move the current position
|
||||||
|
LOG.debug("Opening key '{}' for reading at position '{}", key, newpos);
|
||||||
|
InputStream newStream = store.retrieve(key, newpos);
|
||||||
|
updateInnerStream(newStream, newpos);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized long getPos() throws IOException {
|
public synchronized long getPos() throws IOException {
|
||||||
return pos;
|
return pos;
|
||||||
|
@ -214,7 +267,7 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
}
|
}
|
||||||
|
|
||||||
backupStream.close();
|
backupStream.close();
|
||||||
LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
|
LOG.info("OutputStream for key '{}' closed. Now beginning upload", key);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
byte[] md5Hash = digest == null ? null : digest.digest();
|
byte[] md5Hash = digest == null ? null : digest.digest();
|
||||||
|
@ -226,7 +279,7 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
super.close();
|
super.close();
|
||||||
closed = true;
|
closed = true;
|
||||||
}
|
}
|
||||||
LOG.info("OutputStream for key '" + key + "' upload complete");
|
LOG.info("OutputStream for key '{}' upload complete", key);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -339,7 +392,7 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
|
|
||||||
if (exists(f) && !overwrite) {
|
if (exists(f) && !overwrite) {
|
||||||
throw new IOException("File already exists:"+f);
|
throw new FileAlreadyExistsException("File already exists: " + f);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
|
@ -367,7 +420,7 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
String key = pathToKey(absolutePath);
|
String key = pathToKey(absolutePath);
|
||||||
if (status.isDirectory()) {
|
if (status.isDirectory()) {
|
||||||
if (!recurse && listStatus(f).length > 0) {
|
if (!recurse && listStatus(f).length > 0) {
|
||||||
throw new IOException("Can not delete " + f + " at is a not empty directory and recurse option is false");
|
throw new IOException("Can not delete " + f + " as is a not empty directory and recurse option is false");
|
||||||
}
|
}
|
||||||
|
|
||||||
createParent(f);
|
createParent(f);
|
||||||
|
@ -538,7 +591,7 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
try {
|
try {
|
||||||
FileStatus fileStatus = getFileStatus(f);
|
FileStatus fileStatus = getFileStatus(f);
|
||||||
if (fileStatus.isFile()) {
|
if (fileStatus.isFile()) {
|
||||||
throw new IOException(String.format(
|
throw new FileAlreadyExistsException(String.format(
|
||||||
"Can't make directory for path '%s' since it is a file.", f));
|
"Can't make directory for path '%s' since it is a file.", f));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -556,7 +609,7 @@ public class NativeS3FileSystem extends FileSystem {
|
||||||
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
|
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
|
||||||
FileStatus fs = getFileStatus(f); // will throw if the file doesn't exist
|
FileStatus fs = getFileStatus(f); // will throw if the file doesn't exist
|
||||||
if (fs.isDirectory()) {
|
if (fs.isDirectory()) {
|
||||||
throw new IOException("'" + f + "' is a directory");
|
throw new FileNotFoundException("'" + f + "' is a directory");
|
||||||
}
|
}
|
||||||
LOG.info("Opening '" + f + "' for reading");
|
LOG.info("Opening '" + f + "' for reading");
|
||||||
Path absolutePath = makeAbsolute(f);
|
Path absolutePath = makeAbsolute(f);
|
||||||
|
|
|
@ -621,7 +621,7 @@ abstract public class Shell {
|
||||||
* This is an IOException with exit code added.
|
* This is an IOException with exit code added.
|
||||||
*/
|
*/
|
||||||
public static class ExitCodeException extends IOException {
|
public static class ExitCodeException extends IOException {
|
||||||
int exitCode;
|
private final int exitCode;
|
||||||
|
|
||||||
public ExitCodeException(int exitCode, String message) {
|
public ExitCodeException(int exitCode, String message) {
|
||||||
super(message);
|
super(message);
|
||||||
|
@ -631,6 +631,16 @@ abstract public class Shell {
|
||||||
public int getExitCode() {
|
public int getExitCode() {
|
||||||
return exitCode;
|
return exitCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
final StringBuilder sb =
|
||||||
|
new StringBuilder("ExitCodeException ");
|
||||||
|
sb.append("exitCode=").append(exitCode)
|
||||||
|
.append(": ");
|
||||||
|
sb.append(super.getMessage());
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -0,0 +1,95 @@
|
||||||
|
<!---
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Extending the File System specification and its tests
|
||||||
|
|
||||||
|
The FileSystem specification is incomplete. It doesn't cover all operations or
|
||||||
|
even interfaces and classes in the FileSystem APIs. There may
|
||||||
|
be some minor issues with those that it does cover, such
|
||||||
|
as corner cases, failure modes, and other unexpected outcomes. It may also be that
|
||||||
|
a standard FileSystem significantly diverges from the specification, and
|
||||||
|
it is felt that this needs to be documented and coped with in tests.
|
||||||
|
|
||||||
|
Finally, the FileSystem classes and methods are not fixed forever.
|
||||||
|
They may be extended with new operations on existing classes, as well as
|
||||||
|
potentially entirely new classes and interfaces.
|
||||||
|
|
||||||
|
Accordingly, do not view this specification as a complete static document,
|
||||||
|
any more than the rest of the Hadoop code.
|
||||||
|
|
||||||
|
1. View it as a live document to accompany the reference implementation (HDFS),
|
||||||
|
and the tests used to validate filesystems.
|
||||||
|
1. Don't be afraid to extend or correct it.
|
||||||
|
1. If you are proposing enhancements to the FileSystem APIs, you should extend the
|
||||||
|
specification to match.
|
||||||
|
|
||||||
|
## How to update this specification
|
||||||
|
|
||||||
|
1. Although found in the `hadoop-common` codebase, the HDFS team has ownership of
|
||||||
|
the FileSystem and FileContext APIs. Work with them on the hdfs-dev mailing list.
|
||||||
|
|
||||||
|
1. Create JIRA issues in the `HADOOP` project, component `fs`, to cover changes
|
||||||
|
in the APIs and/or specification.
|
||||||
|
|
||||||
|
1. Code changes will of course require tests. Ideally, changes to the specification
|
||||||
|
itself are accompanied by new tests.
|
||||||
|
|
||||||
|
1. If the change involves operations that already have an `Abstract*ContractTest`,
|
||||||
|
add new test methods to the class and verify that they work on filesystem-specific
|
||||||
|
tests that subclass it. That includes the object stores as well as the local and
|
||||||
|
HDFS filesystems.
|
||||||
|
|
||||||
|
1. If the changes add a new operation, add a new abstract test class
|
||||||
|
with the same contract-driven architecture as the existing one, and an implementation
|
||||||
|
subclass for all filesystems that support the operation.
|
||||||
|
|
||||||
|
1. Add test methods to verify that invalid preconditions result in the expected
|
||||||
|
failures.
|
||||||
|
|
||||||
|
1. Add test methods to verify that valid preconditions result in the expected
|
||||||
|
final state of the filesystem. Testing as little as possible per test aids
|
||||||
|
in tracking down problems.
|
||||||
|
|
||||||
|
1. If possible, add tests to show concurrency expectations.
|
||||||
|
|
||||||
|
If a FileSystem fails a newly added test, then it may be because:
|
||||||
|
|
||||||
|
* The specification is wrong.
|
||||||
|
* The test is wrong.
|
||||||
|
* The test is looking for the wrong exception (i.e. it is too strict).
|
||||||
|
* The specification and tests are correct -and it is the filesystem is not
|
||||||
|
consistent with expectations.
|
||||||
|
|
||||||
|
HDFS has to be treated as correct in its behavior.
|
||||||
|
If the test and specification do not match this behavior, then the specification
|
||||||
|
needs to be updated. Even so, there may be cases where the FS could be changed:
|
||||||
|
|
||||||
|
1. The exception raised is a generic `IOException`, when a more informative
|
||||||
|
subclass, such as `EOFException` can be raised.
|
||||||
|
1. The FileSystem does not fail correctly when passed an invalid set of arguments.
|
||||||
|
This MAY be correctable, though must be done cautiously.
|
||||||
|
|
||||||
|
If the mismatch is in LocalFileSystem, then it probably can't be corrected, as
|
||||||
|
this is the native filesystem as accessed via the Java IO APIs.
|
||||||
|
|
||||||
|
For other FileSystems, their behaviour MAY be updated to more accurately reflect
|
||||||
|
the behavior of HDFS and/or LocalFileSystem. For most operations this is straightforward,
|
||||||
|
though the semantics of `rename()` are complicated enough that it is not clear
|
||||||
|
that HDFS is the correct reference.
|
||||||
|
|
||||||
|
If a test fails and it is felt that it is a unfixable FileSystem-specific issue, then
|
||||||
|
a new contract option to allow for different interpretations of the results should
|
||||||
|
be added to the `ContractOptions` interface, the test modified to react to the
|
||||||
|
presence/absence of the option, and the XML contract files for the standard
|
||||||
|
FileSystems updated to indicate when a feature/failure mode is present.
|
|
@ -0,0 +1,802 @@
|
||||||
|
<!---
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
<!-- ============================================================= -->
|
||||||
|
<!-- CLASS: FileSystem -->
|
||||||
|
<!-- ============================================================= -->
|
||||||
|
|
||||||
|
# class `org.apache.hadoop.fs.FileSystem`
|
||||||
|
|
||||||
|
The abstract `FileSystem` class is the original class to access Hadoop filesystems;
|
||||||
|
non-abstract subclasses exist for all Hadoop-supported filesystems.
|
||||||
|
|
||||||
|
All operations that take a Path to this interface MUST support relative paths.
|
||||||
|
In such a case, they must be resolved relative to the working directory
|
||||||
|
defined by `setWorkingDirectory()`.
|
||||||
|
|
||||||
|
For all clients, therefore, we also add the notion of a state component PWD:
|
||||||
|
this represents the present working directory of the client. Changes to this
|
||||||
|
state are not reflected in the filesystem itself: they are unique to the instance
|
||||||
|
of the client.
|
||||||
|
|
||||||
|
**Implementation Note**: the static `FileSystem get(URI uri, Configuration conf) ` method MAY return
|
||||||
|
a pre-existing instance of a filesystem client class—a class that may also be in use in other threads. The implementations of `FileSystem` which ship with Apache Hadoop *do not make any attempt to synchronize access to the working directory field*.
|
||||||
|
|
||||||
|
## Invariants
|
||||||
|
|
||||||
|
All the requirements of a valid FileSystem are considered implicit preconditions and postconditions:
|
||||||
|
all operations on a valid FileSystem MUST result in a new FileSystem that is also valid.
|
||||||
|
|
||||||
|
|
||||||
|
## Predicates and other state access operations
|
||||||
|
|
||||||
|
|
||||||
|
### `boolean exists(Path p)`
|
||||||
|
|
||||||
|
|
||||||
|
def exists(FS, p) = p in paths(FS)
|
||||||
|
|
||||||
|
|
||||||
|
### `boolean isDirectory(Path p)`
|
||||||
|
|
||||||
|
def isDirectory(FS, p)= p in directories(FS)
|
||||||
|
|
||||||
|
|
||||||
|
### `boolean isFile(Path p)`
|
||||||
|
|
||||||
|
|
||||||
|
def isFile(FS, p) = p in files(FS)
|
||||||
|
|
||||||
|
### `boolean isSymlink(Path p)`
|
||||||
|
|
||||||
|
|
||||||
|
def isSymlink(FS, p) = p in symlinks(FS)
|
||||||
|
|
||||||
|
|
||||||
|
### `FileStatus getFileStatus(Path p)`
|
||||||
|
|
||||||
|
Get the status of a path
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
|
||||||
|
if not exists(FS, p) : raise FileNotFoundException
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
result = stat: FileStatus where:
|
||||||
|
if isFile(FS, p) :
|
||||||
|
stat.length = len(FS.Files[p])
|
||||||
|
stat.isdir = False
|
||||||
|
elif isDir(FS, p) :
|
||||||
|
stat.length = 0
|
||||||
|
stat.isdir = True
|
||||||
|
elif isSymlink(FS, p) :
|
||||||
|
stat.length = 0
|
||||||
|
stat.isdir = False
|
||||||
|
stat.symlink = FS.Symlinks[p]
|
||||||
|
|
||||||
|
### `Path getHomeDirectory()`
|
||||||
|
|
||||||
|
The function `getHomeDirectory` returns the home directory for the FileSystem
|
||||||
|
and the current user account.
|
||||||
|
|
||||||
|
For some FileSystems, the path is `["/", "users", System.getProperty("user-name")]`.
|
||||||
|
|
||||||
|
However, for HDFS, the username is derived from the credentials used to authenticate the client with HDFS. This
|
||||||
|
may differ from the local user account name.
|
||||||
|
|
||||||
|
**It is the responsibility of the FileSystem to determine the actual home directory
|
||||||
|
of the caller.**
|
||||||
|
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
result = p where valid-path(FS, p)
|
||||||
|
|
||||||
|
There is no requirement that the path exists at the time the method was called,
|
||||||
|
or, if it exists, that it points to a directory. However, code tends to assume
|
||||||
|
that `not isFile(FS, getHomeDirectory())` holds to the extent that follow-on
|
||||||
|
code may fail.
|
||||||
|
|
||||||
|
#### Implementation Notes
|
||||||
|
|
||||||
|
* The FTPFileSystem queries this value from the remote filesystem and may
|
||||||
|
fail with a RuntimeException or subclass thereof if there is a connectivity
|
||||||
|
problem. The time to execute the operation is not bounded.
|
||||||
|
|
||||||
|
### `FileSystem.listStatus(Path, PathFilter )`
|
||||||
|
|
||||||
|
A `PathFilter` `f` is a predicate function that returns true iff the path `p`
|
||||||
|
meets the filter's conditions.
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
Path must exist:
|
||||||
|
|
||||||
|
if not exists(FS, p) : raise FileNotFoundException
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
if isFile(FS, p) and f(p) :
|
||||||
|
result = [getFileStatus(p)]
|
||||||
|
|
||||||
|
elif isFile(FS, p) and not f(P) :
|
||||||
|
result = []
|
||||||
|
|
||||||
|
elif isDir(FS, p):
|
||||||
|
result [getFileStatus(c) for c in children(FS, p) where f(c) == True]
|
||||||
|
|
||||||
|
|
||||||
|
**Implicit invariant**: the contents of a `FileStatus` of a child retrieved
|
||||||
|
via `listStatus()` are equal to those from a call of `getFileStatus()`
|
||||||
|
to the same path:
|
||||||
|
|
||||||
|
forall fs in listStatus(Path) :
|
||||||
|
fs == getFileStatus(fs.path)
|
||||||
|
|
||||||
|
|
||||||
|
### Atomicity and Consistency
|
||||||
|
|
||||||
|
By the time the `listStatus()` operation returns to the caller, there
|
||||||
|
is no guarantee that the information contained in the response is current.
|
||||||
|
The details MAY be out of date, including the contents of any directory, the
|
||||||
|
attributes of any files, and the existence of the path supplied.
|
||||||
|
|
||||||
|
The state of a directory MAY change during the evaluation
|
||||||
|
process. This may be reflected in a listing that is split between the pre-
|
||||||
|
and post-update FileSystem states.
|
||||||
|
|
||||||
|
|
||||||
|
* After an entry at path `P` is created, and before any other
|
||||||
|
changes are made to the FileSystem, `listStatus(P)` MUST
|
||||||
|
find the file and return its status.
|
||||||
|
|
||||||
|
* After an entry at path `P` is deleted, `listStatus(P)` MUST
|
||||||
|
raise a `FileNotFoundException`.
|
||||||
|
|
||||||
|
* After an entry at path `P` is created, and before any other
|
||||||
|
changes are made to the FileSystem, the result of `listStatus(parent(P))` SHOULD
|
||||||
|
include the value of `getFileStatus(P)`.
|
||||||
|
|
||||||
|
* After an entry at path `P` is created, and before any other
|
||||||
|
changes are made to the FileSystem, the result of `listStatus(parent(P))` SHOULD
|
||||||
|
NOT include the value of `getFileStatus(P)`.
|
||||||
|
|
||||||
|
This is not a theoretical possibility, it is observable in HDFS when a
|
||||||
|
directory contains many thousands of files.
|
||||||
|
|
||||||
|
Consider a directory "d" with the contents:
|
||||||
|
|
||||||
|
a
|
||||||
|
part-0000001
|
||||||
|
part-0000002
|
||||||
|
...
|
||||||
|
part-9999999
|
||||||
|
|
||||||
|
|
||||||
|
If the number of files is such that HDFS returns a partial listing in each
|
||||||
|
response, then, if a listing `listStatus("d")` takes place concurrently with the operation
|
||||||
|
`rename("d/a","d/z"))`, the result may be one of:
|
||||||
|
|
||||||
|
[a, part-0000001, ... , part-9999999]
|
||||||
|
[part-0000001, ... , part-9999999, z]
|
||||||
|
|
||||||
|
[a, part-0000001, ... , part-9999999, z]
|
||||||
|
[part-0000001, ... , part-9999999]
|
||||||
|
|
||||||
|
While this situation is likely to be a rare occurrence, it MAY happen. In HDFS
|
||||||
|
these inconsistent views are only likely when listing a directory with many children.
|
||||||
|
|
||||||
|
Other filesystems may have stronger consistency guarantees, or return inconsistent
|
||||||
|
data more readily.
|
||||||
|
|
||||||
|
### ` List[BlockLocation] getFileBlockLocations(FileStatus f, int s, int l)`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
if s < 0 or l < 0 : raise {HadoopIllegalArgumentException, InvalidArgumentException}
|
||||||
|
|
||||||
|
* HDFS throws `HadoopIllegalArgumentException` for an invalid offset
|
||||||
|
or length; this extends `IllegalArgumentException`.
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
If the filesystem is location aware, it must return the list
|
||||||
|
of block locations where the data in the range `[s:s+l]` can be found.
|
||||||
|
|
||||||
|
|
||||||
|
if f == null :
|
||||||
|
result = null
|
||||||
|
elif f.getLen()) <= s
|
||||||
|
result = []
|
||||||
|
else result = [ locations(FS, b) for all b in blocks(FS, p, s, s+l)]
|
||||||
|
|
||||||
|
where
|
||||||
|
|
||||||
|
def locations(FS, b) = a list of all locations of a block in the filesystem
|
||||||
|
|
||||||
|
def blocks(FS, p, s, s + l) = a list of the blocks containing data(FS, path)[s:s+l]
|
||||||
|
|
||||||
|
|
||||||
|
Note that that as `length(FS, f) ` is defined as 0 if `isDir(FS, f)`, the result
|
||||||
|
of `getFileBlockLocations()` on a directory is []
|
||||||
|
|
||||||
|
|
||||||
|
If the filesystem is not location aware, it SHOULD return
|
||||||
|
|
||||||
|
[
|
||||||
|
BlockLocation(["localhost:50010"] ,
|
||||||
|
["localhost"],
|
||||||
|
["/default/localhost"]
|
||||||
|
0, F.getLen())
|
||||||
|
] ;
|
||||||
|
|
||||||
|
|
||||||
|
*A bug in Hadoop 1.0.3 means that a topology path of the same number
|
||||||
|
of elements as the cluster topology MUST be provided, hence Filesystems SHOULD
|
||||||
|
return that `"/default/localhost"` path
|
||||||
|
|
||||||
|
|
||||||
|
### `getFileBlockLocations(Path P, int S, int L)`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
|
||||||
|
if p == null : raise NullPointerException
|
||||||
|
if not exists(FS, p) : raise FileNotFoundException
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
result = getFileBlockLocations(getStatus(P), S, L)
|
||||||
|
|
||||||
|
|
||||||
|
### `getDefaultBlockSize()`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
result = integer >= 0
|
||||||
|
|
||||||
|
Although there is no defined minimum value for this result, as it
|
||||||
|
is used to partition work during job submission, a block size
|
||||||
|
that is too small will result in either too many jobs being submitted
|
||||||
|
for efficient work, or the `JobSubmissionClient` running out of memory.
|
||||||
|
|
||||||
|
|
||||||
|
Any FileSystem that does not actually break files into blocks SHOULD
|
||||||
|
return a number for this that results in efficient processing.
|
||||||
|
A FileSystem MAY make this user-configurable (the S3 and Swift filesystem clients do this).
|
||||||
|
|
||||||
|
### `getDefaultBlockSize(Path P)`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
result = integer >= 0
|
||||||
|
|
||||||
|
The outcome of this operation is usually identical to `getDefaultBlockSize()`,
|
||||||
|
with no checks for the existence of the given path.
|
||||||
|
|
||||||
|
Filesystems that support mount points may have different default values for
|
||||||
|
different paths, in which case the specific default value for the destination path
|
||||||
|
SHOULD be returned.
|
||||||
|
|
||||||
|
|
||||||
|
### `getBlockSize(Path P)`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
if not exists(FS, p) : raise FileNotFoundException
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
result == getFileStatus(P).getBlockSize()
|
||||||
|
|
||||||
|
The outcome of this operation MUST be identical to that contained in
|
||||||
|
the `FileStatus` returned from `getFileStatus(P)`.
|
||||||
|
|
||||||
|
|
||||||
|
## State Changing Operations
|
||||||
|
|
||||||
|
### `boolean mkdirs(Path p, FsPermission permission )`
|
||||||
|
|
||||||
|
Create a directory and all its parents
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
|
||||||
|
if exists(FS, p) and not isDir(FS, p) :
|
||||||
|
raise [ParentNotDirectoryException, FileAlreadyExistsException, IOException]
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
FS' where FS'.Directories' = FS.Directories + [p] + ancestors(FS, p)
|
||||||
|
result = True
|
||||||
|
|
||||||
|
|
||||||
|
The condition exclusivity requirement of a FileSystem's directories,
|
||||||
|
files and symbolic links must hold.
|
||||||
|
|
||||||
|
The probe for the existence and type of a path and directory creation MUST be
|
||||||
|
atomic. The combined operation, including `mkdirs(parent(F))` MAY be atomic.
|
||||||
|
|
||||||
|
The return value is always true—even if a new directory is not created
|
||||||
|
(this is defined in HDFS).
|
||||||
|
|
||||||
|
#### Implementation Notes: Local FileSystem
|
||||||
|
|
||||||
|
The local FileSystem does not raise an exception if `mkdirs(p)` is invoked
|
||||||
|
on a path that exists and is a file. Instead the operation returns false.
|
||||||
|
|
||||||
|
if isFile(FS, p):
|
||||||
|
FS' = FS
|
||||||
|
result = False
|
||||||
|
|
||||||
|
### `FSDataOutputStream create(Path, ...)`
|
||||||
|
|
||||||
|
|
||||||
|
FSDataOutputStream create(Path p,
|
||||||
|
FsPermission permission,
|
||||||
|
boolean overwrite,
|
||||||
|
int bufferSize,
|
||||||
|
short replication,
|
||||||
|
long blockSize,
|
||||||
|
Progressable progress) throws IOException;
|
||||||
|
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
The file must not exist for a no-overwrite create:
|
||||||
|
|
||||||
|
if not overwrite and isFile(FS, p) : raise FileAlreadyExistsException
|
||||||
|
|
||||||
|
Writing to or overwriting a directory must fail.
|
||||||
|
|
||||||
|
if isDir(FS, p) : raise {FileAlreadyExistsException, FileNotFoundException, IOException}
|
||||||
|
|
||||||
|
|
||||||
|
FileSystems may reject the request for other
|
||||||
|
reasons, such as the FS being read-only (HDFS),
|
||||||
|
the block size being below the minimum permitted (HDFS),
|
||||||
|
the replication count being out of range (HDFS),
|
||||||
|
quotas on namespace or filesystem being exceeded, reserved
|
||||||
|
names, etc. All rejections SHOULD be `IOException` or a subclass thereof
|
||||||
|
and MAY be a `RuntimeException` or subclass. For instance, HDFS may raise a `InvalidPathException`.
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
FS' where :
|
||||||
|
FS'.Files'[p] == []
|
||||||
|
ancestors(p) is-subset-of FS'.Directories'
|
||||||
|
|
||||||
|
result = FSDataOutputStream
|
||||||
|
|
||||||
|
The updated (valid) FileSystem must contains all the parent directories of the path, as created by `mkdirs(parent(p))`.
|
||||||
|
|
||||||
|
The result is `FSDataOutputStream`, which through its operations may generate new filesystem states with updated values of
|
||||||
|
`FS.Files[p]`
|
||||||
|
|
||||||
|
#### Implementation Notes
|
||||||
|
|
||||||
|
* Some implementations split the create into a check for the file existing
|
||||||
|
from the
|
||||||
|
actual creation. This means the operation is NOT atomic: it is possible for
|
||||||
|
clients creating files with `overwrite==true` to fail if the file is created
|
||||||
|
by another client between the two tests.
|
||||||
|
|
||||||
|
* S3N, Swift and potentially other Object Stores do not currently change the FS state
|
||||||
|
until the output stream `close()` operation is completed.
|
||||||
|
This MAY be a bug, as it allows >1 client to create a file with `overwrite==false`,
|
||||||
|
and potentially confuse file/directory logic
|
||||||
|
|
||||||
|
* The Local FileSystem raises a `FileNotFoundException` when trying to create a file over
|
||||||
|
a directory, hence it is is listed as an exception that MAY be raised when
|
||||||
|
this precondition fails.
|
||||||
|
|
||||||
|
* Not covered: symlinks. The resolved path of the symlink is used as the final path argument to the `create()` operation
|
||||||
|
|
||||||
|
### `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)`
|
||||||
|
|
||||||
|
Implementations MAY throw `UnsupportedOperationException`.
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
if not exists(FS, p) : raise FileNotFoundException
|
||||||
|
|
||||||
|
if not isFile(FS, p) : raise [FileNotFoundException, IOException]
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
FS
|
||||||
|
result = FSDataOutputStream
|
||||||
|
|
||||||
|
Return: `FSDataOutputStream`, which can update the entry `FS.Files[p]`
|
||||||
|
by appending data to the existing list.
|
||||||
|
|
||||||
|
|
||||||
|
### `FSDataInputStream open(Path f, int bufferSize)`
|
||||||
|
|
||||||
|
Implementations MAY throw `UnsupportedOperationException`.
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
if not isFile(FS, p)) : raise [FileNotFoundException, IOException]
|
||||||
|
|
||||||
|
This is a critical precondition. Implementations of some FileSystems (e.g.
|
||||||
|
Object stores) could shortcut one round trip by postponing their HTTP GET
|
||||||
|
operation until the first `read()` on the returned `FSDataInputStream`.
|
||||||
|
However, much client code does depend on the existence check being performed
|
||||||
|
at the time of the `open()` operation. Implementations MUST check for the
|
||||||
|
presence of the file at the time of creation. This does not imply that
|
||||||
|
the file and its data is still at the time of the following `read()` or
|
||||||
|
any successors.
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
result = FSDataInputStream(0, FS.Files[p])
|
||||||
|
|
||||||
|
The result provides access to the byte array defined by `FS.Files[p]`; whether that
|
||||||
|
access is to the contents at the time the `open()` operation was invoked,
|
||||||
|
or whether and how it may pick up changes to that data in later states of FS is
|
||||||
|
an implementation detail.
|
||||||
|
|
||||||
|
The result MUST be the same for local and remote callers of the operation.
|
||||||
|
|
||||||
|
|
||||||
|
#### HDFS implementation notes
|
||||||
|
|
||||||
|
1. HDFS MAY throw `UnresolvedPathException` when attempting to traverse
|
||||||
|
symbolic links
|
||||||
|
|
||||||
|
1. HDFS throws `IOException("Cannot open filename " + src)` if the path
|
||||||
|
exists in the metadata, but no copies of any its blocks can be located;
|
||||||
|
-`FileNotFoundException` would seem more accurate and useful.
|
||||||
|
|
||||||
|
|
||||||
|
### `FileSystem.delete(Path P, boolean recursive)`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
A directory with children and recursive == false cannot be deleted
|
||||||
|
|
||||||
|
if isDir(FS, p) and not recursive and (children(FS, p) != {}) : raise IOException
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
##### Nonexistent path
|
||||||
|
|
||||||
|
If the file does not exist the FS state does not change
|
||||||
|
|
||||||
|
if not exists(FS, p):
|
||||||
|
FS' = FS
|
||||||
|
result = False
|
||||||
|
|
||||||
|
The result SHOULD be `False`, indicating that no file was deleted.
|
||||||
|
|
||||||
|
|
||||||
|
##### Simple File
|
||||||
|
|
||||||
|
|
||||||
|
A path referring to a file is removed, return value: `True`
|
||||||
|
|
||||||
|
if isFile(FS, p) :
|
||||||
|
FS' = (FS.Directories, FS.Files - [p], FS.Symlinks)
|
||||||
|
result = True
|
||||||
|
|
||||||
|
|
||||||
|
##### Empty root directory
|
||||||
|
|
||||||
|
Deleting an empty root does not change the filesystem state
|
||||||
|
and may return true or false.
|
||||||
|
|
||||||
|
if isDir(FS, p) and isRoot(p) and children(FS, p) == {} :
|
||||||
|
FS ' = FS
|
||||||
|
result = (undetermined)
|
||||||
|
|
||||||
|
There is no consistent return code from an attempt to delete the root directory.
|
||||||
|
|
||||||
|
##### Empty (non-root) directory
|
||||||
|
|
||||||
|
Deleting an empty directory that is not root will remove the path from the FS and
|
||||||
|
return true.
|
||||||
|
|
||||||
|
if isDir(FS, p) and not isRoot(p) and children(FS, p) == {} :
|
||||||
|
FS' = (FS.Directories - [p], FS.Files, FS.Symlinks)
|
||||||
|
result = True
|
||||||
|
|
||||||
|
|
||||||
|
##### Recursive delete of root directory
|
||||||
|
|
||||||
|
Deleting a root path with children and `recursive==True`
|
||||||
|
can do one of two things.
|
||||||
|
|
||||||
|
The POSIX model assumes that if the user has
|
||||||
|
the correct permissions to delete everything,
|
||||||
|
they are free to do so (resulting in an empty filesystem).
|
||||||
|
|
||||||
|
if isDir(FS, p) and isRoot(p) and recursive :
|
||||||
|
FS' = ({["/"]}, {}, {}, {})
|
||||||
|
result = True
|
||||||
|
|
||||||
|
In contrast, HDFS never permits the deletion of the root of a filesystem; the
|
||||||
|
filesystem can be taken offline and reformatted if an empty
|
||||||
|
filesystem is desired.
|
||||||
|
|
||||||
|
if isDir(FS, p) and isRoot(p) and recursive :
|
||||||
|
FS' = FS
|
||||||
|
result = False
|
||||||
|
|
||||||
|
##### Recursive delete of non-root directory
|
||||||
|
|
||||||
|
Deleting a non-root path with children `recursive==true`
|
||||||
|
removes the path and all descendants
|
||||||
|
|
||||||
|
if isDir(FS, p) and not isRoot(p) and recursive :
|
||||||
|
FS' where:
|
||||||
|
not isDir(FS', p)
|
||||||
|
and forall d in descendants(FS, p):
|
||||||
|
not isDir(FS', d)
|
||||||
|
not isFile(FS', d)
|
||||||
|
not isSymlink(FS', d)
|
||||||
|
result = True
|
||||||
|
|
||||||
|
#### Atomicity
|
||||||
|
|
||||||
|
* Deleting a file MUST be an atomic action.
|
||||||
|
|
||||||
|
* Deleting an empty directory MUST be an atomic action.
|
||||||
|
|
||||||
|
* A recursive delete of a directory tree MUST be atomic.
|
||||||
|
|
||||||
|
#### Implementation Notes
|
||||||
|
|
||||||
|
* S3N, Swift, FTP and potentially other non-traditional FileSystems
|
||||||
|
implement `delete()` as recursive listing and file delete operation.
|
||||||
|
This can break the expectations of client applications -and means that
|
||||||
|
they cannot be used as drop-in replacements for HDFS.
|
||||||
|
|
||||||
|
<!-- ============================================================= -->
|
||||||
|
<!-- METHOD: rename() -->
|
||||||
|
<!-- ============================================================= -->
|
||||||
|
|
||||||
|
|
||||||
|
### `FileSystem.rename(Path src, Path d)`
|
||||||
|
|
||||||
|
In terms of its specification, `rename()` is one of the most complex operations within a filesystem .
|
||||||
|
|
||||||
|
In terms of its implementation, it is the one with the most ambiguity regarding when to return false
|
||||||
|
versus raising an exception.
|
||||||
|
|
||||||
|
Rename includes the calculation of the destination path.
|
||||||
|
If the destination exists and is a directory, the final destination
|
||||||
|
of the rename becomes the destination + the filename of the source path.
|
||||||
|
|
||||||
|
let dest = if (isDir(FS, src) and d != src) :
|
||||||
|
d + [filename(src)]
|
||||||
|
else :
|
||||||
|
d
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
All checks on the destination path MUST take place after the final `dest` path
|
||||||
|
has been calculated.
|
||||||
|
|
||||||
|
Source `src` must exist:
|
||||||
|
|
||||||
|
exists(FS, src) else raise FileNotFoundException
|
||||||
|
|
||||||
|
|
||||||
|
`dest` cannot be a descendant of `src`:
|
||||||
|
|
||||||
|
if isDescendant(FS, src, dest) : raise IOException
|
||||||
|
|
||||||
|
This implicitly covers the special case of `isRoot(FS, src)`.
|
||||||
|
|
||||||
|
`dest` must be root, or have a parent that exists:
|
||||||
|
|
||||||
|
isRoot(FS, dest) or exists(FS, parent(dest)) else raise IOException
|
||||||
|
|
||||||
|
The parent path of a destination must not be a file:
|
||||||
|
|
||||||
|
if isFile(FS, parent(dest)) : raise IOException
|
||||||
|
|
||||||
|
This implicitly covers all the ancestors of the parent.
|
||||||
|
|
||||||
|
There must not be an existing file at the end of the destination path:
|
||||||
|
|
||||||
|
if isFile(FS, dest) : raise FileAlreadyExistsException, IOException
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
##### Renaming a directory onto itself
|
||||||
|
|
||||||
|
Renaming a directory onto itself is no-op; return value is not specified.
|
||||||
|
|
||||||
|
In POSIX the result is `False`; in HDFS the result is `True`.
|
||||||
|
|
||||||
|
if isDir(FS, src) and src == dest :
|
||||||
|
FS' = FS
|
||||||
|
result = (undefined)
|
||||||
|
|
||||||
|
|
||||||
|
##### Renaming a file to self
|
||||||
|
|
||||||
|
Renaming a file to itself is a no-op; the result is `True`.
|
||||||
|
|
||||||
|
if isFile(FS, src) and src == dest :
|
||||||
|
FS' = FS
|
||||||
|
result = True
|
||||||
|
|
||||||
|
|
||||||
|
##### Renaming a file onto a nonexistent path
|
||||||
|
|
||||||
|
Renaming a file where the destination is a directory moves the file as a child
|
||||||
|
of the destination directory, retaining the filename element of the source path.
|
||||||
|
|
||||||
|
if isFile(FS, src) and src != dest:
|
||||||
|
FS' where:
|
||||||
|
not exists(FS', src)
|
||||||
|
and exists(FS', dest)
|
||||||
|
and data(FS', dest) == data (FS, dest)
|
||||||
|
result = True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### Renaming a directory onto a directory
|
||||||
|
|
||||||
|
If `src` is a directory then all its children will then exist under `dest`, while the path
|
||||||
|
`src` and its descendants will no longer not exist. The names of the paths under
|
||||||
|
`dest` will match those under `src`, as will the contents:
|
||||||
|
|
||||||
|
if isDir(FS, src) isDir(FS, dest) and src != dest :
|
||||||
|
FS' where:
|
||||||
|
not exists(FS', src)
|
||||||
|
and dest in FS'.Directories]
|
||||||
|
and forall c in descendants(FS, src) :
|
||||||
|
not exists(FS', c))
|
||||||
|
and forall c in descendants(FS, src) where isDir(FS, c):
|
||||||
|
isDir(FS', dest + childElements(src, c)
|
||||||
|
and forall c in descendants(FS, src) where not isDir(FS, c):
|
||||||
|
data(FS', dest + childElements(s, c)) == data(FS, c)
|
||||||
|
result = True
|
||||||
|
|
||||||
|
##### Renaming into a path where the parent path does not exist
|
||||||
|
|
||||||
|
not exists(FS, parent(dest))
|
||||||
|
|
||||||
|
There is no consistent behavior here.
|
||||||
|
|
||||||
|
*HDFS*
|
||||||
|
|
||||||
|
The outcome is no change to FileSystem state, with a return value of false.
|
||||||
|
|
||||||
|
FS' = FS; result = False
|
||||||
|
|
||||||
|
*Local Filesystem, S3N*
|
||||||
|
|
||||||
|
The outcome is as a normal rename, with the additional (implicit) feature
|
||||||
|
that the parent directores of the destination also exist
|
||||||
|
|
||||||
|
exists(FS', parent(dest))
|
||||||
|
|
||||||
|
*Other Filesystems (including Swift) *
|
||||||
|
|
||||||
|
Other filesystems strictly reject the operation, raising a `FileNotFoundException`
|
||||||
|
|
||||||
|
##### Concurrency requirements
|
||||||
|
|
||||||
|
* The core operation of `rename()`—moving one entry in the filesystem to
|
||||||
|
another—MUST be atomic. Some applications rely on this as a way to coordinate access to data.
|
||||||
|
|
||||||
|
* Some FileSystem implementations perform checks on the destination
|
||||||
|
FileSystem before and after the rename. One example of this is `ChecksumFileSystem`, which
|
||||||
|
provides checksummed access to local data. The entire sequence MAY NOT be atomic.
|
||||||
|
|
||||||
|
##### Implementation Notes
|
||||||
|
|
||||||
|
**Files open for reading, writing or appending**
|
||||||
|
|
||||||
|
The behavior of `rename()` on an open file is unspecified: whether it is
|
||||||
|
allowed, what happens to later attempts to read from or write to the open stream
|
||||||
|
|
||||||
|
**Renaming a directory onto itself**
|
||||||
|
|
||||||
|
The return code of renaming a directory onto itself is unspecified.
|
||||||
|
|
||||||
|
**Destination exists and is a file**
|
||||||
|
|
||||||
|
Renaming a file atop an existing file is specified as failing, raising an exception.
|
||||||
|
|
||||||
|
* Local FileSystem : the rename succeeds; the destination file is replaced by the source file.
|
||||||
|
|
||||||
|
* HDFS : The rename fails, no exception is raised. Instead the method call simply returns false.
|
||||||
|
|
||||||
|
**Missing source file**
|
||||||
|
|
||||||
|
If the source file `src` does not exist, `FileNotFoundException` should be raised.
|
||||||
|
|
||||||
|
HDFS fails without raising an exception; `rename()` merely returns false.
|
||||||
|
|
||||||
|
FS' = FS
|
||||||
|
result = false
|
||||||
|
|
||||||
|
The behavior of HDFS here should not be considered a feature to replicate.
|
||||||
|
`FileContext` explicitly changed the behavior to raise an exception, and the retrofitting of that action
|
||||||
|
to the `DFSFileSystem` implementation is an ongoing matter for debate.
|
||||||
|
|
||||||
|
|
||||||
|
### `concat(Path p, Path sources[])`
|
||||||
|
|
||||||
|
Joins multiple blocks together to create a single file. This
|
||||||
|
is a little-used operation currently implemented only by HDFS.
|
||||||
|
|
||||||
|
Implementations MAY throw `UnsupportedOperationException`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
if not exists(FS, p) : raise FileNotFoundException
|
||||||
|
|
||||||
|
if sources==[] : raise IllegalArgumentException
|
||||||
|
|
||||||
|
All sources MUST be in the same directory:
|
||||||
|
|
||||||
|
for s in sources: if parent(S) != parent(p) raise IllegalArgumentException
|
||||||
|
|
||||||
|
All block sizes must match that of the target:
|
||||||
|
|
||||||
|
for s in sources: getBlockSize(FS, S) == getBlockSize(FS, p)
|
||||||
|
|
||||||
|
No duplicate paths:
|
||||||
|
|
||||||
|
not (exists p1, p2 in (sources + [p]) where p1 == p2)
|
||||||
|
|
||||||
|
HDFS: All source files except the final one MUST be a complete block:
|
||||||
|
|
||||||
|
for s in (sources[0:length(sources)-1] + [p]):
|
||||||
|
(length(FS, s) mod getBlockSize(FS, p)) == 0
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
FS' where:
|
||||||
|
(data(FS', T) = data(FS, T) + data(FS, sources[0]) + ... + data(FS, srcs[length(srcs)-1]))
|
||||||
|
and for s in srcs: not exists(FS', S)
|
||||||
|
|
||||||
|
|
||||||
|
HDFS's restrictions may be an implementation detail of how it implements
|
||||||
|
`concat` -by changing the inode references to join them together in
|
||||||
|
a sequence. As no other filesystem in the Hadoop core codebase
|
||||||
|
implements this method, there is no way to distinguish implementation detail.
|
||||||
|
from specification.
|
|
@ -0,0 +1,379 @@
|
||||||
|
<!---
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- ============================================================= -->
|
||||||
|
<!-- CLASS: FSDataInputStream -->
|
||||||
|
<!-- ============================================================= -->
|
||||||
|
|
||||||
|
|
||||||
|
# Class `FSDataInputStream extends DataInputStream`
|
||||||
|
|
||||||
|
The core behavior of `FSDataInputStream` is defined by `java.io.DataInputStream`,
|
||||||
|
with extensions that add key assumptions to the system.
|
||||||
|
|
||||||
|
1. The source is a local or remote filesystem.
|
||||||
|
1. The stream being read references a finite array of bytes.
|
||||||
|
1. The length of the data does not change during the read process.
|
||||||
|
1. The contents of the data does not change during the process.
|
||||||
|
1. The source file remains present during the read process
|
||||||
|
1. Callers may use `Seekable.seek()` to offsets within the array of bytes, with future
|
||||||
|
reads starting at this offset.
|
||||||
|
1. The cost of forward and backward seeks is low.
|
||||||
|
1. There is no requirement for the stream implementation to be thread-safe.
|
||||||
|
Callers MUST assume that instances are not thread-safe.
|
||||||
|
|
||||||
|
|
||||||
|
Files are opened via `FileSystem.open(p)`, which, if successful, returns:
|
||||||
|
|
||||||
|
result = FSDataInputStream(0, FS.Files[p])
|
||||||
|
|
||||||
|
The stream can be modeled as:
|
||||||
|
|
||||||
|
FSDIS = (pos, data[], isOpen)
|
||||||
|
|
||||||
|
with access functions:
|
||||||
|
|
||||||
|
pos(FSDIS)
|
||||||
|
data(FSDIS)
|
||||||
|
isOpen(FSDIS)
|
||||||
|
|
||||||
|
**Implicit invariant**: the size of the data stream equals the size of the
|
||||||
|
file as returned by `FileSystem.getFileStatus(Path p)`
|
||||||
|
|
||||||
|
forall p in dom(FS.Files[p]) :
|
||||||
|
len(data(FSDIS)) == FS.getFileStatus(p).length
|
||||||
|
|
||||||
|
|
||||||
|
### `Closeable.close()`
|
||||||
|
|
||||||
|
The semantics of `java.io.Closeable` are defined in the interface definition
|
||||||
|
within the JRE.
|
||||||
|
|
||||||
|
The operation MUST be idempotent; the following sequence is not an error:
|
||||||
|
|
||||||
|
FSDIS.close();
|
||||||
|
FSDIS.close();
|
||||||
|
|
||||||
|
#### Implementation Notes
|
||||||
|
|
||||||
|
* Implementations SHOULD be robust against failure. If an inner stream
|
||||||
|
is closed, it should be checked for being `null` first.
|
||||||
|
|
||||||
|
* Implementations SHOULD NOT raise `IOException` exceptions (or any other exception)
|
||||||
|
during this operation. Client applications often ignore these, or may fail
|
||||||
|
unexpectedly.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
|
||||||
|
FSDIS' = ((undefined), (undefined), False)
|
||||||
|
|
||||||
|
|
||||||
|
### `Seekable.getPos()`
|
||||||
|
|
||||||
|
Return the current position. The outcome when a stream is closed is undefined.
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
isOpen(FSDIS)
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
result = pos(FSDIS)
|
||||||
|
|
||||||
|
|
||||||
|
### `InputStream.read()`
|
||||||
|
|
||||||
|
Return the data at the current position.
|
||||||
|
|
||||||
|
1. Implementations should fail when a stream is closed
|
||||||
|
1. There is no limit on how long `read()` may take to complete.
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
isOpen(FSDIS)
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
if ( pos < len(data) ):
|
||||||
|
FSDIS' = (pos + 1, data, True)
|
||||||
|
result = data[pos]
|
||||||
|
else
|
||||||
|
result = -1
|
||||||
|
|
||||||
|
|
||||||
|
### `InputStream.read(buffer[], offset, length)`
|
||||||
|
|
||||||
|
Read `length` bytes of data into the destination buffer, starting at offset
|
||||||
|
`offset`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
isOpen(FSDIS)
|
||||||
|
buffer != null else raise NullPointerException
|
||||||
|
length >= 0
|
||||||
|
offset < len(buffer)
|
||||||
|
length <= len(buffer) - offset
|
||||||
|
|
||||||
|
Exceptions that may be raised on precondition failure are
|
||||||
|
|
||||||
|
InvalidArgumentException
|
||||||
|
ArrayIndexOutOfBoundsException
|
||||||
|
RuntimeException
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
if length == 0 :
|
||||||
|
result = 0
|
||||||
|
|
||||||
|
elseif pos > len(data):
|
||||||
|
result -1
|
||||||
|
|
||||||
|
else
|
||||||
|
let l = min(length, len(data)-length) :
|
||||||
|
buffer' = buffer where forall i in [0..l-1]:
|
||||||
|
buffer'[o+i] = data[pos+i]
|
||||||
|
FSDIS' = (pos+l, data, true)
|
||||||
|
result = l
|
||||||
|
|
||||||
|
### `Seekable.seek(s)`
|
||||||
|
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
Not all subclasses implement the Seek operation:
|
||||||
|
|
||||||
|
supported(FSDIS, Seekable.seek) else raise [UnsupportedOperationException, IOException]
|
||||||
|
|
||||||
|
If the operation is supported, the file SHOULD be open:
|
||||||
|
|
||||||
|
isOpen(FSDIS)
|
||||||
|
|
||||||
|
Some filesystems do not perform this check, relying on the `read()` contract
|
||||||
|
to reject reads on a closed stream (e.g. `RawLocalFileSystem`).
|
||||||
|
|
||||||
|
A `seek(0)` MUST always succeed, as the seek position must be
|
||||||
|
positive and less than the length of the Stream's:
|
||||||
|
|
||||||
|
s > 0 and ((s==0) or ((s < len(data)))) else raise [EOFException, IOException]
|
||||||
|
|
||||||
|
Some FileSystems do not raise an exception if this condition is not met. They
|
||||||
|
instead return -1 on any `read()` operation where, at the time of the read,
|
||||||
|
`len(data(FSDIS)) < pos(FSDIS)`.
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
FSDIS' = (s, data, True)
|
||||||
|
|
||||||
|
There is an implicit invariant: a seek to the current position is a no-op
|
||||||
|
|
||||||
|
seek(getPos())
|
||||||
|
|
||||||
|
Implementations may recognise this operation and bypass all other precondition
|
||||||
|
checks, leaving the input stream unchanged.
|
||||||
|
|
||||||
|
|
||||||
|
### `Seekable.seekToNewSource(offset)`
|
||||||
|
|
||||||
|
This operation instructs the source to retrieve `data[]` from a different
|
||||||
|
source from the current source. This is only relevant if the filesystem supports
|
||||||
|
multiple replicas of a file and there is more than 1 replica of the
|
||||||
|
data at offset `offset`.
|
||||||
|
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
Not all subclasses implement the operation operation, and instead
|
||||||
|
either raise an exception or return `False`.
|
||||||
|
|
||||||
|
supported(FSDIS, Seekable.seekToNewSource) else raise [UnsupportedOperationException, IOException]
|
||||||
|
|
||||||
|
Examples: `CompressionInputStream` , `HttpFSFileSystem`
|
||||||
|
|
||||||
|
If supported, the file must be open:
|
||||||
|
|
||||||
|
isOpen(FSDIS)
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
The majority of subclasses that do not implement this operation simply
|
||||||
|
fail.
|
||||||
|
|
||||||
|
if not supported(FSDIS, Seekable.seekToNewSource(s)):
|
||||||
|
result = False
|
||||||
|
|
||||||
|
Examples: `RawLocalFileSystem` , `HttpFSFileSystem`
|
||||||
|
|
||||||
|
If the operation is supported and there is a new location for the data:
|
||||||
|
|
||||||
|
FSDIS' = (pos, data', true)
|
||||||
|
result = True
|
||||||
|
|
||||||
|
The new data is the original data (or an updated version of it, as covered
|
||||||
|
in the Consistency section below), but the block containing the data at `offset`
|
||||||
|
sourced from a different replica.
|
||||||
|
|
||||||
|
If there is no other copy, `FSDIS` is not updated; the response indicates this:
|
||||||
|
|
||||||
|
result = False
|
||||||
|
|
||||||
|
Outside of test methods, the primary use of this method is in the {{FSInputChecker}}
|
||||||
|
class, which can react to a checksum error in a read by attempting to source
|
||||||
|
the data elsewhere. It a new source can be found it attempts to reread and
|
||||||
|
recheck that portion of the file.
|
||||||
|
|
||||||
|
## interface `PositionedReadable`
|
||||||
|
|
||||||
|
The `PositionedReadable` operations provide the ability to
|
||||||
|
read data into a buffer from a specific position in
|
||||||
|
the data stream.
|
||||||
|
|
||||||
|
Although the interface declares that it must be thread safe,
|
||||||
|
some of the implementations do not follow this guarantee.
|
||||||
|
|
||||||
|
#### Implementation preconditions
|
||||||
|
|
||||||
|
Not all `FSDataInputStream` implementations support these operations. Those that do
|
||||||
|
not implement `Seekable.seek()` do not implement the `PositionedReadable`
|
||||||
|
interface.
|
||||||
|
|
||||||
|
supported(FSDIS, Seekable.seek) else raise [UnsupportedOperationException, IOException]
|
||||||
|
|
||||||
|
This could be considered obvious: if a stream is not Seekable, a client
|
||||||
|
cannot seek to a location. It is also a side effect of the
|
||||||
|
base class implementation, which uses `Seekable.seek()`.
|
||||||
|
|
||||||
|
|
||||||
|
**Implicit invariant**: for all `PositionedReadable` operations, the value
|
||||||
|
of `pos` is unchanged at the end of the operation
|
||||||
|
|
||||||
|
pos(FSDIS') == pos(FSDIS)
|
||||||
|
|
||||||
|
|
||||||
|
There are no guarantees that this holds *during* the operation.
|
||||||
|
|
||||||
|
|
||||||
|
#### Failure states
|
||||||
|
|
||||||
|
For any operations that fail, the contents of the destination
|
||||||
|
`buffer` are undefined. Implementations may overwrite part
|
||||||
|
or all of the buffer before reporting a failure.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### `int PositionedReadable.read(position, buffer, offset, length)`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
position > 0 else raise [IllegalArgumentException, RuntimeException]
|
||||||
|
len(buffer) + offset < len(data) else raise [IndexOutOfBoundException, RuntimeException]
|
||||||
|
length >= 0
|
||||||
|
offset >= 0
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
The amount of data read is the less of the length or the amount
|
||||||
|
of data available from the specified position:
|
||||||
|
|
||||||
|
let available = min(length, len(data)-position)
|
||||||
|
buffer'[offset..(offset+available-1)] = data[position..position+available -1]
|
||||||
|
result = available
|
||||||
|
|
||||||
|
|
||||||
|
### `void PositionedReadable.readFully(position, buffer, offset, length)`
|
||||||
|
|
||||||
|
#### Preconditions
|
||||||
|
|
||||||
|
position > 0 else raise [IllegalArgumentException, RuntimeException]
|
||||||
|
length >= 0
|
||||||
|
offset >= 0
|
||||||
|
(position + length) <= len(data) else raise [EOFException, IOException]
|
||||||
|
len(buffer) + offset < len(data)
|
||||||
|
|
||||||
|
#### Postconditions
|
||||||
|
|
||||||
|
The amount of data read is the less of the length or the amount
|
||||||
|
of data available from the specified position:
|
||||||
|
|
||||||
|
let available = min(length, len(data)-position)
|
||||||
|
buffer'[offset..(offset+length-1)] = data[position..(position + length -1)]
|
||||||
|
|
||||||
|
### `PositionedReadable.readFully(position, buffer)`
|
||||||
|
|
||||||
|
The semantics of this are exactly equivalent to
|
||||||
|
|
||||||
|
readFully(position, buffer, 0, len(buffer))
|
||||||
|
|
||||||
|
|
||||||
|
## Consistency
|
||||||
|
|
||||||
|
* All readers, local and remote, of a data stream FSDIS provided from a `FileSystem.open(p)`
|
||||||
|
are expected to receive access to the data of `FS.Files[p]` at the time of opening.
|
||||||
|
* If the underlying data is changed during the read process, these changes MAY or
|
||||||
|
MAY NOT be visible.
|
||||||
|
* Such changes are visible MAY be partially visible.
|
||||||
|
|
||||||
|
|
||||||
|
At time t0
|
||||||
|
|
||||||
|
FSDIS0 = FS'read(p) = (0, data0[])
|
||||||
|
|
||||||
|
At time t1
|
||||||
|
|
||||||
|
FS' = FS' where FS'.Files[p] = data1
|
||||||
|
|
||||||
|
From time `t >= t1`, the value of `FSDIS0` is undefined.
|
||||||
|
|
||||||
|
It may be unchanged
|
||||||
|
|
||||||
|
FSDIS0.data == data0
|
||||||
|
|
||||||
|
forall l in len(FSDIS0.data):
|
||||||
|
FSDIS0.read() == data0[l]
|
||||||
|
|
||||||
|
|
||||||
|
It may pick up the new data
|
||||||
|
|
||||||
|
FSDIS0.data == data1
|
||||||
|
|
||||||
|
forall l in len(FSDIS0.data):
|
||||||
|
FSDIS0.read() == data1[l]
|
||||||
|
|
||||||
|
It may be inconsistent, such that a read of an offset returns
|
||||||
|
data from either of the datasets
|
||||||
|
|
||||||
|
forall l in len(FSDIS0.data):
|
||||||
|
(FSDIS0.read(l) == data0[l]) or (FSDIS0.read(l) == data1[l]))
|
||||||
|
|
||||||
|
That is, every value read may be from the original or updated file.
|
||||||
|
|
||||||
|
It may also be inconsistent on repeated reads of same offset, that is
|
||||||
|
at time `t2 > t1`:
|
||||||
|
|
||||||
|
r2 = FSDIS0.read(l)
|
||||||
|
|
||||||
|
While at time `t3 > t2`:
|
||||||
|
|
||||||
|
r3 = FSDIS0.read(l)
|
||||||
|
|
||||||
|
It may be that `r3 != r2`. (That is, some of the data my be cached or replicated,
|
||||||
|
and on a subsequent read, a different version of the file's contents are returned).
|
||||||
|
|
||||||
|
|
||||||
|
Similarly, if the data at the path `p`, is deleted, this change MAY or MAY
|
||||||
|
not be visible during read operations performed on `FSDIS0`.
|
|
@ -0,0 +1,37 @@
|
||||||
|
<!---
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# The Hadoop FileSystem API Definition
|
||||||
|
|
||||||
|
This is a specification of the Hadoop FileSystem APIs, which models
|
||||||
|
the contents of a filesystem as a set of paths that are either directories,
|
||||||
|
symbolic links, or files.
|
||||||
|
|
||||||
|
There is surprisingly little prior art in this area. There are multiple specifications of
|
||||||
|
Unix filesystems as a tree of inodes, but nothing public which defines the
|
||||||
|
notion of "Unix filesystem as a conceptual model for data storage access".
|
||||||
|
|
||||||
|
This specification attempts to do that; to define the Hadoop FileSystem model
|
||||||
|
and APIs so that multiple filesystems can implement the APIs and present a consistent
|
||||||
|
model of their data to applications. It does not attempt to formally specify any of the
|
||||||
|
concurrency behaviors of the filesystems, other than to document the behaviours exhibited by
|
||||||
|
HDFS as these are commonly expected by Hadoop client applications.
|
||||||
|
|
||||||
|
1. [Introduction](introduction.html)
|
||||||
|
1. [Notation](notation.html)
|
||||||
|
1. [Model](model.html)
|
||||||
|
1. [FileSystem class](filesystem.html)
|
||||||
|
1. [FSDataInputStream class](fsdatainputstream.html)
|
||||||
|
2. [Testing with the Filesystem specification](testing.html)
|
||||||
|
2. [Extending the specification and its tests](extending.html)
|
|
@ -0,0 +1,377 @@
|
||||||
|
<!---
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Introduction
|
||||||
|
|
||||||
|
This document defines the required behaviors of a Hadoop-compatible filesystem
|
||||||
|
for implementors and maintainers of the Hadoop filesystem, and for users of
|
||||||
|
the Hadoop FileSystem APIs
|
||||||
|
|
||||||
|
Most of the Hadoop operations are tested against HDFS in the Hadoop test
|
||||||
|
suites, initially through `MiniDFSCluster`, before release by vendor-specific
|
||||||
|
'production' tests, and implicitly by the Hadoop stack above it.
|
||||||
|
|
||||||
|
HDFS's actions have been modeled on POSIX filesystem behavior, using the actions and
|
||||||
|
return codes of Unix filesystem actions as a reference. Even so, there
|
||||||
|
are places where HDFS diverges from the expected behaviour of a POSIX
|
||||||
|
filesystem.
|
||||||
|
|
||||||
|
The behaviour of other Hadoop filesystems are not as rigorously tested.
|
||||||
|
The bundled S3 FileSystem makes Amazon's S3 Object Store ("blobstore")
|
||||||
|
accessible through the FileSystem API. The Swift FileSystem driver provides similar
|
||||||
|
functionality for the OpenStack Swift blobstore. The Azure object storage
|
||||||
|
FileSystem in branch-1-win talks to Microsoft's Azure equivalent. All of these
|
||||||
|
bind to object stores, which do have different behaviors, especially regarding
|
||||||
|
consistency guarantees, and atomicity of operations.
|
||||||
|
|
||||||
|
The "Local" FileSystem provides access to the underlying filesystem of the
|
||||||
|
platform. Its behavior is defined by the operating system and can
|
||||||
|
behave differently from HDFS. Examples of local filesystem quirks include
|
||||||
|
case-sensitivity, action when attempting to rename a file atop another file,
|
||||||
|
and whether it is possible to `seek()` past
|
||||||
|
the end of the file.
|
||||||
|
|
||||||
|
There are also filesystems implemented by third parties that assert
|
||||||
|
compatibility with Apache Hadoop. There is no formal compatibility suite, and
|
||||||
|
hence no way for anyone to declare compatibility except in the form of their
|
||||||
|
own compatibility tests.
|
||||||
|
|
||||||
|
These documents *do not* attempt to provide a normative definition of compatibility.
|
||||||
|
Passing the associated test suites *does not* guarantee correct behavior of applications.
|
||||||
|
|
||||||
|
What the test suites do define is the expected set of actions—failing these
|
||||||
|
tests will highlight potential issues.
|
||||||
|
|
||||||
|
By making each aspect of the contract tests configurable, it is possible to
|
||||||
|
declare how a filesystem diverges from parts of the standard contract.
|
||||||
|
This is information which can be conveyed to users of the filesystem.
|
||||||
|
|
||||||
|
### Naming
|
||||||
|
|
||||||
|
This document follows RFC 2119 rules regarding the use of MUST, MUST NOT, MAY,
|
||||||
|
and SHALL. MUST NOT is treated as normative.
|
||||||
|
|
||||||
|
## Implicit assumptions of the Hadoop FileSystem APIs
|
||||||
|
|
||||||
|
The original `FileSystem` class and its usages are based on an implicit set of
|
||||||
|
assumptions. Chiefly, that HDFS is
|
||||||
|
the underlying FileSystem, and that it offers a subset of the behavior of a
|
||||||
|
POSIX filesystem (or at least the implementation of the POSIX filesystem
|
||||||
|
APIs and model provided by Linux filesystems).
|
||||||
|
|
||||||
|
Irrespective of the API, it's expected that all Hadoop-compatible filesystems
|
||||||
|
present the model of a filesystem implemented in Unix:
|
||||||
|
|
||||||
|
* It's a hierarchical directory structure with files and directories.
|
||||||
|
|
||||||
|
* Files contain zero or more bytes of data.
|
||||||
|
|
||||||
|
* You cannot put files or directories under a file.
|
||||||
|
|
||||||
|
* Directories contain zero or more files.
|
||||||
|
|
||||||
|
* A directory entry has no data itself.
|
||||||
|
|
||||||
|
* You can write arbitrary binary data to a file. When the file's contents
|
||||||
|
are read, from anywhere inside or outside of the cluster, the data is returned.
|
||||||
|
|
||||||
|
* You can store many gigabytes of data in a single file.
|
||||||
|
|
||||||
|
* The root directory, `"/"`, always exists, and cannot be renamed.
|
||||||
|
|
||||||
|
* The root directory, `"/"`, is always a directory, and cannot be overwritten by a file write operation.
|
||||||
|
|
||||||
|
* Any attempt to recursively delete the root directory will delete its contents (barring
|
||||||
|
lack of permissions), but will not delete the root path itself.
|
||||||
|
|
||||||
|
* You cannot rename/move a directory under itself.
|
||||||
|
|
||||||
|
* You cannot rename/move a directory atop any existing file other than the
|
||||||
|
source file itself.
|
||||||
|
|
||||||
|
* Directory listings return all the data files in the directory (i.e.
|
||||||
|
there may be hidden checksum files, but all the data files are listed).
|
||||||
|
|
||||||
|
* The attributes of a file in a directory listing (e.g. owner, length) match
|
||||||
|
the actual attributes of a file, and are consistent with the view from an
|
||||||
|
opened file reference.
|
||||||
|
|
||||||
|
* Security: if the caller lacks the permissions for an operation, it will fail and raise an error.
|
||||||
|
|
||||||
|
### Path Names
|
||||||
|
|
||||||
|
* A Path is comprised of Path elements separated by `"/"`.
|
||||||
|
|
||||||
|
* A path element is a unicode string of 1 or more characters.
|
||||||
|
|
||||||
|
* Path element MUST NOT include the characters `":"` or `"/"`.
|
||||||
|
|
||||||
|
* Path element SHOULD NOT include characters of ASCII/UTF-8 value 0-31 .
|
||||||
|
|
||||||
|
* Path element MUST NOT be `"."` or `".."`
|
||||||
|
|
||||||
|
* Note also that the Azure blob store documents say that paths SHOULD NOT use
|
||||||
|
a trailing `"."` (as their .NET URI class strips it).
|
||||||
|
|
||||||
|
* Paths are compared based on unicode code-points.
|
||||||
|
|
||||||
|
* Case-insensitive and locale-specific comparisons MUST NOT not be used.
|
||||||
|
|
||||||
|
### Security Assumptions
|
||||||
|
|
||||||
|
Except in the special section on security, this document assumes the client has
|
||||||
|
full access to the FileSystem. Accordingly, the majority of items in the list
|
||||||
|
do not add the qualification "assuming the user has the rights to perform the
|
||||||
|
operation with the supplied parameters and paths".
|
||||||
|
|
||||||
|
The failure modes when a user lacks security permissions are not specified.
|
||||||
|
|
||||||
|
### Networking Assumptions
|
||||||
|
|
||||||
|
This document assumes this all network operations succeed. All statements
|
||||||
|
can be assumed to be qualified as *"assuming the operation does not fail due
|
||||||
|
to a network availability problem"*
|
||||||
|
|
||||||
|
* The final state of a FileSystem after a network failure is undefined.
|
||||||
|
|
||||||
|
* The immediate consistency state of a FileSystem after a network failure is undefined.
|
||||||
|
|
||||||
|
* If a network failure can be reported to the client, the failure MUST be an
|
||||||
|
instance of `IOException` or subclass thereof.
|
||||||
|
|
||||||
|
* The exception details SHOULD include diagnostics suitable for an experienced
|
||||||
|
Java developer _or_ operations team to begin diagnostics. For example, source
|
||||||
|
and destination hostnames and ports on a ConnectionRefused exception.
|
||||||
|
|
||||||
|
* The exception details MAY include diagnostics suitable for inexperienced
|
||||||
|
developers to begin diagnostics. For example Hadoop tries to include a
|
||||||
|
reference to [ConnectionRefused](http://wiki.apache.org/hadoop/ConnectionRefused) when a TCP
|
||||||
|
connection request is refused.
|
||||||
|
|
||||||
|
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
|
||||||
|
|
||||||
|
## Core Expectations of a Hadoop Compatible FileSystem
|
||||||
|
|
||||||
|
Here are the core expectations of a Hadoop-compatible FileSystem.
|
||||||
|
Some FileSystems do not meet all these expectations; as a result,
|
||||||
|
some programs may not work as expected.
|
||||||
|
|
||||||
|
### Atomicity
|
||||||
|
|
||||||
|
There are some operations that MUST be atomic. This is because they are
|
||||||
|
often used to implement locking/exclusive access between processes in a cluster.
|
||||||
|
|
||||||
|
1. Creating a file. If the `overwrite` parameter is false, the check and creation
|
||||||
|
MUST be atomic.
|
||||||
|
1. Deleting a file.
|
||||||
|
1. Renaming a file.
|
||||||
|
1. Renaming a directory.
|
||||||
|
1. Creating a single directory with `mkdir()`.
|
||||||
|
|
||||||
|
* Recursive directory deletion MAY be atomic. Although HDFS offers atomic
|
||||||
|
recursive directory deletion, none of the other Hadoop FileSystems
|
||||||
|
offer such a guarantee (including local FileSystems).
|
||||||
|
|
||||||
|
Most other operations come with no requirements or guarantees of atomicity.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Consistency
|
||||||
|
|
||||||
|
The consistency model of a Hadoop FileSystem is *one-copy-update-semantics*;
|
||||||
|
that of a traditional local POSIX filesystem. Note that even NFS relaxes
|
||||||
|
some constraints about how fast changes propagate.
|
||||||
|
|
||||||
|
* *Create.* Once the `close()` operation on an output stream writing a newly
|
||||||
|
created file has completed, in-cluster operations querying the file metadata
|
||||||
|
and contents MUST immediately see the file and its data.
|
||||||
|
|
||||||
|
* *Update.* Once the `close()` operation on an output stream writing a newly
|
||||||
|
created file has completed, in-cluster operations querying the file metadata
|
||||||
|
and contents MUST immediately see the new data.
|
||||||
|
|
||||||
|
* *Delete.* once a `delete()` operation on a path other than "/" has completed successfully,
|
||||||
|
it MUST NOT be visible or accessible. Specifically,
|
||||||
|
`listStatus()`, `open()` ,`rename()` and `append()`
|
||||||
|
operations MUST fail.
|
||||||
|
|
||||||
|
* *Delete then create.* When a file is deleted then a new file of the same name created, the new file
|
||||||
|
MUST be immediately visible and its contents accessible via the FileSystem APIs.
|
||||||
|
|
||||||
|
* *Rename.* After a `rename()` has completed, operations against the new path MUST
|
||||||
|
succeed; attempts to access the data against the old path MUST fail.
|
||||||
|
|
||||||
|
* The consistency semantics inside of the cluster MUST be the same as outside of the cluster.
|
||||||
|
All clients querying a file that is not being actively manipulated MUST see the
|
||||||
|
same metadata and data irrespective of their location.
|
||||||
|
|
||||||
|
### Concurrency
|
||||||
|
|
||||||
|
There are no guarantees of isolated access to data: if one client is interacting
|
||||||
|
with a remote file and another client changes that file, the changes may or may
|
||||||
|
not be visible.
|
||||||
|
|
||||||
|
### Operations and failures
|
||||||
|
|
||||||
|
* All operations MUST eventually complete, successfully or unsuccessfully.
|
||||||
|
|
||||||
|
* The time to complete an operation is undefined and may depend on
|
||||||
|
the implementation and on the state of the system.
|
||||||
|
|
||||||
|
* Operations MAY throw a `RuntimeException` or subclass thereof.
|
||||||
|
|
||||||
|
* Operations SHOULD raise all network, remote, and high-level problems as
|
||||||
|
an `IOException` or subclass thereof, and SHOULD NOT raise a
|
||||||
|
`RuntimeException` for such problems.
|
||||||
|
|
||||||
|
* Operations SHOULD report failures by way of raised exceptions, rather
|
||||||
|
than specific return codes of an operation.
|
||||||
|
|
||||||
|
* In the text, when an exception class is named, such as `IOException`,
|
||||||
|
the raised exception MAY be an instance or subclass of the named exception.
|
||||||
|
It MUST NOT be a superclass.
|
||||||
|
|
||||||
|
* If an operation is not implemented in a class, the implementation must
|
||||||
|
throw an `UnsupportedOperationException`.
|
||||||
|
|
||||||
|
* Implementations MAY retry failed operations until they succeed. If they do this,
|
||||||
|
they SHOULD do so in such a way that the *happens-before* relationship between
|
||||||
|
any sequence of operations meets the consistency and atomicity requirements
|
||||||
|
stated. See [HDFS-4849](https://issues.apache.org/jira/browse/HDFS-4849)
|
||||||
|
for an example of this: HDFS does not implement any retry feature that
|
||||||
|
could be observable by other callers.
|
||||||
|
|
||||||
|
### Undefined capacity limits
|
||||||
|
|
||||||
|
Here are some limits to FileSystem capacity that have never been explicitly
|
||||||
|
defined.
|
||||||
|
|
||||||
|
1. The maximum number of files in a directory.
|
||||||
|
|
||||||
|
1. Max number of directories in a directory
|
||||||
|
|
||||||
|
1. Maximum total number of entries (files and directories) in a filesystem.
|
||||||
|
|
||||||
|
1. The maximum length of a filename under a directory (HDFS: 8000).
|
||||||
|
|
||||||
|
1. `MAX_PATH` - the total length of the entire directory tree referencing a
|
||||||
|
file. Blobstores tend to stop at ~1024 characters.
|
||||||
|
|
||||||
|
1. The maximum depth of a path (HDFS: 1000 directories).
|
||||||
|
|
||||||
|
1. The maximum size of a single file.
|
||||||
|
|
||||||
|
### Undefined timeouts
|
||||||
|
|
||||||
|
Timeouts for operations are not defined at all, including:
|
||||||
|
|
||||||
|
* The maximum completion time of blocking FS operations.
|
||||||
|
MAPREDUCE-972 documents how `distcp` broke on slow s3 renames.
|
||||||
|
|
||||||
|
* The timeout for idle read streams before they are closed.
|
||||||
|
|
||||||
|
* The timeout for idle write streams before they are closed.
|
||||||
|
|
||||||
|
The blocking-operation timeout is in fact variable in HDFS, as sites and
|
||||||
|
clients may tune the retry parameters so as to convert filesystem failures and
|
||||||
|
failovers into pauses in operation. Instead there is a general assumption that
|
||||||
|
FS operations are "fast but not as fast as local FS operations", and that the latency of data
|
||||||
|
reads and writes scale with the volume of data. This
|
||||||
|
assumption by client applications reveals a more fundamental one: that the filesystem is "close"
|
||||||
|
as far as network latency and bandwidth is concerned.
|
||||||
|
|
||||||
|
There are also some implicit assumptions about the overhead of some operations.
|
||||||
|
|
||||||
|
1. `seek()` operations are fast and incur little or no network delays. [This
|
||||||
|
does not hold on blob stores]
|
||||||
|
|
||||||
|
1. Directory list operations are fast for directories with few entries.
|
||||||
|
|
||||||
|
1. Directory list operations are fast for directories with few entries, but may
|
||||||
|
incur a cost that is `O(entries)`. Hadoop 2 added iterative listing to
|
||||||
|
handle the challenge of listing directories with millions of entries without
|
||||||
|
buffering -at the cost of consistency.
|
||||||
|
|
||||||
|
1. A `close()` of an `OutputStream` is fast, irrespective of whether or not
|
||||||
|
the file operation has succeeded or not.
|
||||||
|
|
||||||
|
1. The time to delete a directory is independent of the size of the number of
|
||||||
|
child entries
|
||||||
|
|
||||||
|
### Object Stores vs. Filesystems
|
||||||
|
|
||||||
|
This specification refers to *Object Stores* in places, often using the
|
||||||
|
term *Blobstore*. Hadoop does provide FileSystem client classes for some of these
|
||||||
|
even though they violate many of the requirements. This is why, although
|
||||||
|
Hadoop can read and write data in an object store, the two which Hadoop ships
|
||||||
|
with direct support for —Amazon S3 and OpenStack Swift&mdash cannot
|
||||||
|
be used as direct replacement for HDFS.
|
||||||
|
|
||||||
|
*What is an Object Store?*
|
||||||
|
|
||||||
|
An object store is a data storage service, usually accessed over HTTP/HTTPS.
|
||||||
|
A `PUT` request uploads an object/"Blob"; a `GET` request retrieves it; ranged
|
||||||
|
`GET` operations permit portions of a blob to retrieved.
|
||||||
|
To delete the object, the HTTP `DELETE` operation is invoked.
|
||||||
|
|
||||||
|
Objects are stored by name: a string, possibly with "/" symbols in them. There
|
||||||
|
is no notion of a directory; arbitrary names can be assigned to objects —
|
||||||
|
within the limitations of the naming scheme imposed by the service's provider.
|
||||||
|
|
||||||
|
The object stores invariably provide an operation to retrieve objects with
|
||||||
|
a given prefix; a `GET` operation on the root of the service with the
|
||||||
|
appropriate query parameters.
|
||||||
|
|
||||||
|
Object stores usually prioritize availability —there is no single point
|
||||||
|
of failure equivalent to the HDFS NameNode(s). They also strive for simple
|
||||||
|
non-POSIX APIs: the HTTP verbs are the operations allowed.
|
||||||
|
|
||||||
|
Hadoop FileSystem clients for object stores attempt to make the
|
||||||
|
stores pretend that they are a FileSystem, a FileSystem with the same
|
||||||
|
features and operations as HDFS. This is —ultimately—a pretence:
|
||||||
|
they have different characteristics and occasionally the illusion fails.
|
||||||
|
|
||||||
|
1. **Consistency**. Object stores are generally *Eventually Consistent*: it
|
||||||
|
can take time for changes to objects —creation, deletion and updates—
|
||||||
|
to become visible to all callers. Indeed, there is no guarantee a change is
|
||||||
|
immediately visible to the client which just made the change. As an example,
|
||||||
|
an object `test/data1.csv` may be overwritten with a new set of data, but when
|
||||||
|
a `GET test/data1.csv` call is made shortly after the update, the original data
|
||||||
|
returned. Hadoop assumes that filesystems are consistent; that creation, updates
|
||||||
|
and deletions are immediately visible, and that the results of listing a directory
|
||||||
|
are current with respect to the files within that directory.
|
||||||
|
|
||||||
|
1. **Atomicity**. Hadoop assumes that directory `rename()` operations are atomic,
|
||||||
|
as are `delete()` operations. Object store FileSystem clients implement these
|
||||||
|
as operations on the individual objects whose names match the directory prefix.
|
||||||
|
As a result, the changes take place a file at a time, and are not atomic. If
|
||||||
|
an operation fails part way through the process, the the state of the object store
|
||||||
|
reflects the partially completed operation. Note also that client code
|
||||||
|
assumes that these operations are `O(1)` —in an object store they are
|
||||||
|
more likely to be be `O(child-entries)`.
|
||||||
|
|
||||||
|
1. **Durability**. Hadoop assumes that `OutputStream` implementations write data
|
||||||
|
to their (persistent) storage on a `flush()` operation. Object store implementations
|
||||||
|
save all their written data to a local file, a file that is then only `PUT`
|
||||||
|
to the object store in the final `close()` operation. As a result, there is
|
||||||
|
never any partial data from incomplete or failed operations. Furthermore,
|
||||||
|
as the write process only starts in `close()` operation, that operation may take
|
||||||
|
a time proportional to the quantity of data to upload, and inversely proportional
|
||||||
|
to the network bandwidth. It may also fail —a failure that is better
|
||||||
|
escalated than ignored.
|
||||||
|
|
||||||
|
Object stores with these characteristics, can not be used as a direct replacement
|
||||||
|
for HDFS. In terms of this specification, their implementations of the
|
||||||
|
specified operations do not match those required. They are considered supported
|
||||||
|
by the Hadoop development community, but not to the same extent as HDFS.
|
|
@ -0,0 +1,230 @@
|
||||||
|
<!---
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# A Model of a Hadoop Filesystem
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### Paths and Path Elements
|
||||||
|
|
||||||
|
A Path is a list of Path elements which represents a path to a file, directory of symbolic link
|
||||||
|
|
||||||
|
Path elements are non-empty strings. The exact set of valid strings MAY
|
||||||
|
be specific to a particular FileSystem implementation.
|
||||||
|
|
||||||
|
Path elements MUST NOT be in `{"", ".", "..", "/"}`.
|
||||||
|
|
||||||
|
Path elements MUST NOT contain the characters `{'/', ':'}`.
|
||||||
|
|
||||||
|
Filesystems MAY have other strings that are not permitted in a path element.
|
||||||
|
|
||||||
|
When validating path elements, the exception `InvalidPathException` SHOULD
|
||||||
|
be raised when a path is invalid [HDFS]
|
||||||
|
|
||||||
|
Predicate: `valid-path-element:List[String];`
|
||||||
|
|
||||||
|
A path element `pe` is invalid if any character in it is in the set of forbidden characters,
|
||||||
|
or the element as a whole is invalid
|
||||||
|
|
||||||
|
forall e in pe: not (e in {'/', ':'})
|
||||||
|
not pe in {"", ".", "..", "/"}
|
||||||
|
|
||||||
|
|
||||||
|
Predicate: `valid-path:List<PathElement>`
|
||||||
|
|
||||||
|
A Path `p` is *valid* if all path elements in it are valid
|
||||||
|
|
||||||
|
def valid-path(pe): forall pe in Path: valid-path-element(pe)
|
||||||
|
|
||||||
|
|
||||||
|
The set of all possible paths is *Paths*; this is the infinite set of all lists of valid path elements.
|
||||||
|
|
||||||
|
The path represented by empty list, `[]` is the *root path*, and is denoted by the string `"/"`.
|
||||||
|
|
||||||
|
The partial function `parent(path:Path):Path` provides the parent path can be defined using
|
||||||
|
list slicing.
|
||||||
|
|
||||||
|
def parent(pe) : pe[0:-1]
|
||||||
|
|
||||||
|
Preconditions:
|
||||||
|
|
||||||
|
path != []
|
||||||
|
|
||||||
|
|
||||||
|
#### `filename:Path->PathElement`
|
||||||
|
|
||||||
|
The last Path Element in a Path is called the filename.
|
||||||
|
|
||||||
|
def filename(p) : p[-1]
|
||||||
|
|
||||||
|
Preconditions:
|
||||||
|
|
||||||
|
p != []
|
||||||
|
|
||||||
|
#### `childElements:(Path p, Path q):Path`
|
||||||
|
|
||||||
|
|
||||||
|
The partial function `childElements:(Path p, Path q):Path`
|
||||||
|
is the list of path elements in `p` that follow the path `q`.
|
||||||
|
|
||||||
|
def childElements(p, q): p[len(q):]
|
||||||
|
|
||||||
|
Preconditions:
|
||||||
|
|
||||||
|
|
||||||
|
# The path 'q' must be at the head of the path 'p'
|
||||||
|
q == p[:len(q)]
|
||||||
|
|
||||||
|
|
||||||
|
#### ancestors(Path): List[Path]
|
||||||
|
|
||||||
|
The list of all paths that are either the direct parent of a path p, or a parent of
|
||||||
|
ancestor of p.
|
||||||
|
|
||||||
|
#### Notes
|
||||||
|
|
||||||
|
This definition handles absolute paths but not relative ones; it needs to be reworked so the root element is explicit, presumably
|
||||||
|
by declaring that the root (and only the root) path element may be ['/'].
|
||||||
|
|
||||||
|
Relative paths can then be distinguished from absolute paths as the input to any function and resolved when the second entry in a two-argument function
|
||||||
|
such as `rename`.
|
||||||
|
|
||||||
|
### Defining the Filesystem
|
||||||
|
|
||||||
|
|
||||||
|
A filesystem `FS` contains a set of directories, a dictionary of paths and a dictionary of symbolic links
|
||||||
|
|
||||||
|
(Directories:set[Path], Files:[Path:List[byte]], Symlinks:set[Path])
|
||||||
|
|
||||||
|
|
||||||
|
Accessor functions return the specific element of a filesystem
|
||||||
|
|
||||||
|
def FS.Directories = FS.Directories
|
||||||
|
def file(FS) = FS.Files
|
||||||
|
def symlinks(FS) = FS.Symlinks
|
||||||
|
def filenames(FS) = keys(FS.Files)
|
||||||
|
|
||||||
|
The entire set of a paths finite subset of all possible Paths, and functions to resolve a path to data, a directory predicate or a symbolic link:
|
||||||
|
|
||||||
|
def paths(FS) = FS.Directories + filenames(FS) + FS.Symlinks)
|
||||||
|
|
||||||
|
A path is deemed to exist if it is in this aggregate set:
|
||||||
|
|
||||||
|
def exists(FS, p) = p in paths(FS)
|
||||||
|
|
||||||
|
The root path, "/", is a directory represented by the path ["/"], which must always exist in a filesystem.
|
||||||
|
|
||||||
|
def isRoot(p) = p == ["/"].
|
||||||
|
|
||||||
|
forall FS in FileSystems : ["/"] in FS.Directories
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### Directory references
|
||||||
|
|
||||||
|
A path MAY refer to a directory in a FileSystem:
|
||||||
|
|
||||||
|
isDir(FS, p): p in FS.Directories
|
||||||
|
|
||||||
|
Directories may have children, that is, there may exist other paths
|
||||||
|
in the FileSystem whose path begins with a directory. Only directories
|
||||||
|
may have children. This can be expressed
|
||||||
|
by saying that every path's parent must be a directory.
|
||||||
|
|
||||||
|
It can then be declared that a path has no parent in which case it is the root directory,
|
||||||
|
or it MUST have a parent that is a directory:
|
||||||
|
|
||||||
|
forall p in paths(FS) : isRoot(p) or isDir(FS, parent(p))
|
||||||
|
|
||||||
|
Because the parent directories of all directories must themselves satisfy
|
||||||
|
this criterion, it is implicit that only leaf nodes may be files or symbolic links:
|
||||||
|
|
||||||
|
Furthermore, because every filesystem contains the root path, every filesystem
|
||||||
|
must contain at least one directory.
|
||||||
|
|
||||||
|
A directory may have children:
|
||||||
|
|
||||||
|
def children(FS, p) = {q for q in paths(FS) where parent(q) == p}
|
||||||
|
|
||||||
|
There are no duplicate names in the child paths, because all paths are
|
||||||
|
taken from the set of lists of path elements. There can be no duplicate entries
|
||||||
|
in a set, hence no children with duplicate names.
|
||||||
|
|
||||||
|
A path *D* is a descendant of a path *P* if it is the direct child of the
|
||||||
|
path *P* or an ancestor is a direct child of path *P*:
|
||||||
|
|
||||||
|
def isDescendant(P, D) = parent(D) == P where isDescendant(P, parent(D))
|
||||||
|
|
||||||
|
The descendants of a directory P are all paths in the filesystem whose
|
||||||
|
path begins with the path P -that is their parent is P or an ancestor is P
|
||||||
|
|
||||||
|
def descendants(FS, D) = {p for p in paths(FS) where isDescendant(D, p)}
|
||||||
|
|
||||||
|
|
||||||
|
#### File references
|
||||||
|
|
||||||
|
A path MAY refer to a file; that it it has data in the filesystem; its path is a key in the data dictionary
|
||||||
|
|
||||||
|
def isFile(FS, p) = p in FS.Files
|
||||||
|
|
||||||
|
|
||||||
|
#### Symbolic references
|
||||||
|
|
||||||
|
A path MAY refer to a symbolic link:
|
||||||
|
|
||||||
|
def isSymlink(FS, p) = p in symlinks(FS)
|
||||||
|
|
||||||
|
|
||||||
|
#### File Length
|
||||||
|
|
||||||
|
The length of a path p in a filesystem FS is the length of the data stored, or 0 if it is a directory:
|
||||||
|
|
||||||
|
def length(FS, p) = if isFile(p) : return length(data(FS, p)) else return 0
|
||||||
|
|
||||||
|
### User home
|
||||||
|
|
||||||
|
The home directory of a user is an implicit part of a filesystem, and is derived from the userid of the
|
||||||
|
process working with the filesystem:
|
||||||
|
|
||||||
|
def getHomeDirectory(FS) : Path
|
||||||
|
|
||||||
|
The function `getHomeDirectory` returns the home directory for the Filesystem and the current user account.
|
||||||
|
For some FileSystems, the path is `["/","users", System.getProperty("user-name")]`. However,
|
||||||
|
for HDFS,
|
||||||
|
|
||||||
|
#### Exclusivity
|
||||||
|
|
||||||
|
A path cannot refer to more than one of a file, a directory or a symbolic link
|
||||||
|
|
||||||
|
|
||||||
|
FS.Directories ^ keys(data(FS)) == {}
|
||||||
|
FS.Directories ^ symlinks(FS) == {}
|
||||||
|
keys(data(FS))(FS) ^ symlinks(FS) == {}
|
||||||
|
|
||||||
|
|
||||||
|
This implies that only files may have data.
|
||||||
|
|
||||||
|
This condition is invariant and is an implicit postcondition of all
|
||||||
|
operations that manipulate the state of a FileSystem `FS`.
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
|
||||||
|
Not covered: hard links in a FileSystem. If a FileSystem supports multiple
|
||||||
|
references in *paths(FS)* to point to the same data, the outcome of operations
|
||||||
|
are undefined.
|
||||||
|
|
||||||
|
This model of a FileSystem is sufficient to describe all the FileSystem
|
||||||
|
queries and manipulations excluding metadata and permission operations.
|
||||||
|
The Hadoop `FileSystem` and `FileContext` interfaces can be specified
|
||||||
|
in terms of operations that query or change the state of a FileSystem.
|
|
@ -0,0 +1,191 @@
|
||||||
|
<!---
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
# Notation
|
||||||
|
|
||||||
|
A formal notation such as [The Z Notation](http://www.open-std.org/jtc1/sc22/open/n3187.pdf)
|
||||||
|
would be the strictest way to define Hadoop FileSystem behavior, and could even
|
||||||
|
be used to prove some axioms.
|
||||||
|
|
||||||
|
However, it has a number of practical flaws:
|
||||||
|
|
||||||
|
1. Such notations are not as widely used as they should be, so the broader software
|
||||||
|
development community is not going to have practical experience of it.
|
||||||
|
|
||||||
|
1. It's very hard to work with without dropping into tools such as LaTeX *and* add-on libraries.
|
||||||
|
|
||||||
|
1. Such notations are difficult to understand, even for experts.
|
||||||
|
|
||||||
|
Given that the target audience of this specification is FileSystem developers,
|
||||||
|
formal notations are not appropriate. Instead, broad comprehensibility, ease of maintenance, and
|
||||||
|
ease of deriving tests take priority over mathematically-pure formal notation.
|
||||||
|
|
||||||
|
### Mathematics Symbols in this document
|
||||||
|
|
||||||
|
This document does use a subset of [the notation in the Z syntax](http://staff.washington.edu/jon/z/glossary.html),
|
||||||
|
but in an ASCII form and the use of Python list notation for manipulating lists and sets.
|
||||||
|
|
||||||
|
* `iff` : `iff` If and only if
|
||||||
|
* `⇒` : `implies`
|
||||||
|
* `→` : `-->` total function
|
||||||
|
* `↛` : `->` partial function
|
||||||
|
|
||||||
|
|
||||||
|
* `∩` : `^`: Set Intersection
|
||||||
|
* `∪` : `+`: Set Union
|
||||||
|
* `\` : `-`: Set Difference
|
||||||
|
|
||||||
|
* `∃` : `exists` Exists predicate
|
||||||
|
* `∀` : `forall`: For all predicate
|
||||||
|
* `=` : `==` Equals operator
|
||||||
|
* `≠` : `!=` operator. In Java `z ≠ y` is written as `!( z.equals(y))` for all non-simple datatypes
|
||||||
|
* `≡` : `equivalent-to` equivalence operator. This is stricter than equals.
|
||||||
|
* `∅` : `{}` Empty Set. `∅ ≡ {}`
|
||||||
|
* `≈` : `approximately-equal-to` operator
|
||||||
|
* `¬` : `not` Not operator. In Java, `!`
|
||||||
|
* `∄` : `does-not-exist`: Does not exist predicate. Equivalent to `not exists`
|
||||||
|
* `∧` : `and` : local and operator. In Java , `&&`
|
||||||
|
* `∨` : `or` : local and operator. In Java, `||`
|
||||||
|
* `∈` : `in` : element of
|
||||||
|
* `∉` : `not in` : not an element of
|
||||||
|
* `⊆` : `subset-or-equal-to` the subset or equality condition
|
||||||
|
* `⊂` : `subset-of` the proper subset condition
|
||||||
|
* `| p |` : `len(p)` the size of a variable
|
||||||
|
|
||||||
|
* `:=` : `=` :
|
||||||
|
|
||||||
|
* `` : `#` : Python-style comments
|
||||||
|
|
||||||
|
* `happens-before` : `happens-before` : Lamport's ordering relationship as defined in
|
||||||
|
[Time, Clocks and the Ordering of Events in a Distributed System](http://research.microsoft.com/en-us/um/people/lamport/pubs/time-clocks.pdf)
|
||||||
|
|
||||||
|
#### Sets, Lists, Maps, and Strings
|
||||||
|
|
||||||
|
The [python data structures](http://docs.python.org/2/tutorial/datastructures.html)
|
||||||
|
are used as the basis for this syntax as it is both plain ASCII and well-known.
|
||||||
|
|
||||||
|
##### Lists
|
||||||
|
|
||||||
|
* A list *L* is an ordered sequence of elements `[e1, e2, ... en]`
|
||||||
|
* The size of a list `len(L)` is the number of elements in a list.
|
||||||
|
* Items can be addressed by a 0-based index `e1 == L[0]`
|
||||||
|
* Python slicing operators can address subsets of a list `L[0:3] == [e1,e2]`, `L[:-1] == en`
|
||||||
|
* Lists can be concatenated `L' = L + [ e3 ]`
|
||||||
|
* Lists can have entries removed `L' = L - [ e2, e1 ]`. This is different from Python's
|
||||||
|
`del` operation, which operates on the list in place.
|
||||||
|
* The membership predicate `in` returns true iff an element is a member of a List: `e2 in L`
|
||||||
|
* List comprehensions can create new lists: `L' = [ x for x in l where x < 5]`
|
||||||
|
* for a list `L`, `len(L)` returns the number of elements.
|
||||||
|
|
||||||
|
|
||||||
|
##### Sets
|
||||||
|
|
||||||
|
Sets are an extension of the List notation, adding the restrictions that there can
|
||||||
|
be no duplicate entries in the set, and there is no defined order.
|
||||||
|
|
||||||
|
* A set is an unordered collection of items surrounded by `{` and `}` braces.
|
||||||
|
* When declaring one, the python constructor `{}` is used. This is different from Python, which uses the function `set([list])`. Here the assumption
|
||||||
|
is that the difference between a set and a dictionary can be determined from the contents.
|
||||||
|
* The empty set `{}` has no elements.
|
||||||
|
* All the usual set concepts apply.
|
||||||
|
* The membership predicate is `in`.
|
||||||
|
* Set comprehension uses the Python list comprehension.
|
||||||
|
`S' = {s for s in S where len(s)==2}`
|
||||||
|
* for a set *s*, `len(s)` returns the number of elements.
|
||||||
|
* The `-` operator returns a new set excluding all items listed in the righthand set of the operator.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### Maps
|
||||||
|
|
||||||
|
Maps resemble Python dictionaries; {"key":value, "key2",value2}
|
||||||
|
|
||||||
|
* `keys(Map)` represents the set of keys in a map.
|
||||||
|
* `k in Map` holds iff `k in keys(Map)`
|
||||||
|
* The empty map is written `{:}`
|
||||||
|
* The `-` operator returns a new map which excludes the entry with the key specified.
|
||||||
|
* `len(Map)` returns the number of entries in the map.
|
||||||
|
|
||||||
|
##### Strings
|
||||||
|
|
||||||
|
Strings are lists of characters represented in double quotes. e.g. `"abc"`
|
||||||
|
|
||||||
|
"abc" == ['a','b','c']
|
||||||
|
|
||||||
|
#### State Immutability
|
||||||
|
|
||||||
|
All system state declarations are immutable.
|
||||||
|
|
||||||
|
The suffix "'" (single quote) is used as the convention to indicate the state of the system after a operation:
|
||||||
|
|
||||||
|
L' = L + ['d','e']
|
||||||
|
|
||||||
|
|
||||||
|
#### Function Specifications
|
||||||
|
|
||||||
|
A function is defined as a set of preconditions and a set of postconditions,
|
||||||
|
where the postconditions define the new state of the system and the return value from the function.
|
||||||
|
|
||||||
|
|
||||||
|
### Exceptions
|
||||||
|
|
||||||
|
In classic specification languages, the preconditions define the predicates that MUST be
|
||||||
|
satisfied else some failure condition is raised.
|
||||||
|
|
||||||
|
For Hadoop, we need to be able to specify what failure condition results if a specification is not
|
||||||
|
met (usually what exception is to be raised).
|
||||||
|
|
||||||
|
The notation `raise <exception-name>` is used to indicate that an exception is to be raised.
|
||||||
|
|
||||||
|
It can be used in the if-then-else sequence to define an action if a precondition is not met.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
if not exists(FS, Path) : raise IOException
|
||||||
|
|
||||||
|
If implementations may raise any one of a set of exceptions, this is denoted by
|
||||||
|
providing a set of exceptions:
|
||||||
|
|
||||||
|
if not exists(FS, Path) : raise {FileNotFoundException, IOException}
|
||||||
|
|
||||||
|
If a set of exceptions is provided, the earlier elements
|
||||||
|
of the set are preferred to the later entries, on the basis that they aid diagnosis of problems.
|
||||||
|
|
||||||
|
We also need to distinguish predicates that MUST be satisfied, along with those that SHOULD be met.
|
||||||
|
For this reason a function specification MAY include a section in the preconditions marked 'Should:'
|
||||||
|
All predicates declared in this section SHOULD be met, and if there is an entry in that section
|
||||||
|
which specifies a stricter outcome, it SHOULD BE preferred. Here is an example of a should-precondition:
|
||||||
|
|
||||||
|
Should:
|
||||||
|
|
||||||
|
if not exists(FS, Path) : raise FileNotFoundException
|
||||||
|
|
||||||
|
|
||||||
|
### Conditions
|
||||||
|
|
||||||
|
There are further conditions used in precondition and postcondition declarations.
|
||||||
|
|
||||||
|
|
||||||
|
#### `supported(instance, method)`
|
||||||
|
|
||||||
|
|
||||||
|
This condition declares that a subclass implements the named method
|
||||||
|
-some subclasses of the verious FileSystem classes do not, and instead
|
||||||
|
raise `UnsupportedOperation`
|
||||||
|
|
||||||
|
As an example, one precondition of `FSDataInputStream.seek`
|
||||||
|
is that the implementation must support `Seekable.seek` :
|
||||||
|
|
||||||
|
supported(FDIS, Seekable.seek) else raise UnsupportedOperation
|
|
@ -0,0 +1,324 @@
|
||||||
|
<!---
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. See accompanying LICENSE file.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Testing the Filesystem Contract
|
||||||
|
|
||||||
|
## Running the tests
|
||||||
|
|
||||||
|
A normal Hadoop test run will test those FileSystems that can be tested locally
|
||||||
|
via the local filesystem. This typically means `file://` and its underlying `LocalFileSystem`, and
|
||||||
|
`hdfs://` via the HDFS MiniCluster.
|
||||||
|
|
||||||
|
Other filesystems are skipped unless there is a specific configuration to the
|
||||||
|
remote server providing the filesystem.
|
||||||
|
|
||||||
|
|
||||||
|
These filesystem bindings must be defined in an XML configuration file, usually
|
||||||
|
`hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml`.
|
||||||
|
This file is excluded should not be checked in.
|
||||||
|
|
||||||
|
### s3://
|
||||||
|
|
||||||
|
In `contract-test-options.xml`, the filesystem name must be defined in the property `fs.contract.test.fs.s3`. The standard configuration options to define the S3 authentication details must also be provided.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.fs.s3</name>
|
||||||
|
<value>s3://tests3hdfs/</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.s3.awsAccessKeyId</name>
|
||||||
|
<value>DONOTPCOMMITTHISKEYTOSCM</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.s3.awsSecretAccessKey</name>
|
||||||
|
<value>DONOTEVERSHARETHISSECRETKEY!</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
|
||||||
|
### s3n://
|
||||||
|
|
||||||
|
|
||||||
|
In `contract-test-options.xml`, the filesystem name must be defined in the property `fs.contract.test.fs.s3n`. The standard configuration options to define the S3N authentication details muse also be provided.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.fs.s3n</name>
|
||||||
|
<value>s3n://tests3contract</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.s3n.awsAccessKeyId</name>
|
||||||
|
<value>DONOTPCOMMITTHISKEYTOSCM</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.s3n.awsSecretAccessKey</name>
|
||||||
|
<value>DONOTEVERSHARETHISSECRETKEY!</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
### ftp://
|
||||||
|
|
||||||
|
|
||||||
|
In `contract-test-options.xml`, the filesystem name must be defined in
|
||||||
|
the property `fs.contract.test.fs.ftp`. The specific login options to
|
||||||
|
connect to the FTP Server must then be provided.
|
||||||
|
|
||||||
|
A path to a test directory must also be provided in the option
|
||||||
|
`fs.contract.test.ftp.testdir`. This is the directory under which
|
||||||
|
operations take place.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.fs.ftp</name>
|
||||||
|
<value>ftp://server1/</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.ftp.user.server1</name>
|
||||||
|
<value>testuser</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.ftp.testdir</name>
|
||||||
|
<value>/home/testuser/test</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.ftp.password.server1</name>
|
||||||
|
<value>secret-login</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
|
||||||
|
|
||||||
|
### swift://
|
||||||
|
|
||||||
|
The OpenStack Swift login details must be defined in the file
|
||||||
|
`/hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml`.
|
||||||
|
The standard hadoop-common `contract-test-options.xml` resource file cannot be
|
||||||
|
used, as that file does not get included in `hadoop-common-test.jar`.
|
||||||
|
|
||||||
|
|
||||||
|
In `/hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml`
|
||||||
|
the Swift bucket name must be defined in the property `fs.contract.test.fs.swift`,
|
||||||
|
along with the login details for the specific Swift service provider in which the
|
||||||
|
bucket is posted.
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.fs.swift</name>
|
||||||
|
<value>swift://swiftbucket.rackspace/</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.swift.service.rackspace.auth.url</name>
|
||||||
|
<value>https://auth.api.rackspacecloud.com/v2.0/tokens</value>
|
||||||
|
<description>Rackspace US (multiregion)</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.swift.service.rackspace.username</name>
|
||||||
|
<value>this-is-your-username</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.swift.service.rackspace.region</name>
|
||||||
|
<value>DFW</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.swift.service.rackspace.apikey</name>
|
||||||
|
<value>ab0bceyoursecretapikeyffef</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
</configuration>
|
||||||
|
|
||||||
|
1. Often the different public cloud Swift infrastructures exhibit different behaviors
|
||||||
|
(authentication and throttling in particular). We recommand that testers create
|
||||||
|
accounts on as many of these providers as possible and test against each of them.
|
||||||
|
1. They can be slow, especially remotely. Remote links are also the most likely
|
||||||
|
to make eventual-consistency behaviors visible, which is a mixed benefit.
|
||||||
|
|
||||||
|
## Testing a new filesystem
|
||||||
|
|
||||||
|
The core of adding a new FileSystem to the contract tests is adding a
|
||||||
|
new contract class, then creating a new non-abstract test class for every test
|
||||||
|
suite that you wish to test.
|
||||||
|
|
||||||
|
1. Do not try and add these tests into Hadoop itself. They won't be added to
|
||||||
|
the soutce tree. The tests must live with your own filesystem source.
|
||||||
|
1. Create a package in your own test source tree (usually) under `contract`,
|
||||||
|
for the files and tests.
|
||||||
|
1. Subclass `AbstractFSContract` for your own contract implementation.
|
||||||
|
1. For every test suite you plan to support create a non-abstract subclass,
|
||||||
|
with the name starting with `Test` and the name of the filesystem.
|
||||||
|
Example: `TestHDFSRenameContract`.
|
||||||
|
1. These non-abstract classes must implement the abstract method
|
||||||
|
`createContract()`.
|
||||||
|
1. Identify and document any filesystem bindings that must be defined in a
|
||||||
|
`src/test/resources/contract-test-options.xml` file of the specific project.
|
||||||
|
1. Run the tests until they work.
|
||||||
|
|
||||||
|
|
||||||
|
As an example, here is the implementation of the test of the `create()` tests for the local filesystem.
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractCreateContractTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestLocalCreateContract extends AbstractCreateContractTest {
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
The standard implementation technique for subclasses of `AbstractFSContract` is to be driven entirely by a Hadoop XML configuration file stored in the test resource tree. The best practise is to store it under `/contract` with the name of the FileSystem, such as `contract/localfs.xml`. Having the XML file define all FileSystem options makes the listing of FileSystem behaviors immediately visible.
|
||||||
|
|
||||||
|
The `LocalFSContract` is a special case of this, as it must adjust its case sensitivity policy based on the OS on which it is running: for both Windows and OS/X, the filesystem is case insensitive, so the `ContractOptions.IS_CASE_SENSITIVE` option must be set to false. Furthermore, the Windows filesystem does not support Unix file and directory permissions, so the relevant flag must also be set. This is done *after* loading the XML contract file from the resource tree, simply by updating the now-loaded configuration options:
|
||||||
|
|
||||||
|
getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Handling test failures
|
||||||
|
|
||||||
|
If your new `FileSystem` test cases fails one of the contract tests, what you can you do?
|
||||||
|
|
||||||
|
It depends on the cause of the problem
|
||||||
|
|
||||||
|
1. Case: custom `FileSystem` subclass class doesn't correctly implement specification. Fix.
|
||||||
|
1. Case: Underlying filesystem doesn't behave in a way that matches Hadoop's expectations. Ideally, fix. Or try to make your `FileSystem` subclass hide the differences, e.g. by translating exceptions.
|
||||||
|
1. Case: fundamental architectural differences between your filesystem and Hadoop. Example: different concurrency and consistency model. Recommendation: document and make clear that the filesystem is not compatible with HDFS.
|
||||||
|
1. Case: test does not match the specification. Fix: patch test, submit the patch to Hadoop.
|
||||||
|
1. Case: specification incorrect. The underlying specification is (with a few exceptions) HDFS. If the specification does not match HDFS, HDFS should normally be assumed to be the real definition of what a FileSystem should do. If there's a mismatch, please raise it on the `hdfs-dev` mailing list. Note that while FileSystem tests live in the core Hadoop codebase, it is the HDFS team who owns the FileSystem specification and the tests that accompany it.
|
||||||
|
|
||||||
|
If a test needs to be skipped because a feature is not supported, look for a existing configuration option in the `ContractOptions` class. If there is no method, the short term fix is to override the method and use the `ContractTestUtils.skip()` message to log the fact that a test is skipped. Using this method prints the message to the logs, then tells the test runner that the test was skipped. This highlights the problem.
|
||||||
|
|
||||||
|
A recommended strategy is to call the superclass, catch the exception, and verify that the exception class and part of the error string matches that raised by the current implementation. It should also `fail()` if superclass actually succeeded -that is it failed the way that the implemention does not currently do. This will ensure that the test path is still executed, any other failure of the test -possibly a regression- is picked up. And, if the feature does become implemented, that the change is picked up.
|
||||||
|
|
||||||
|
A long-term solution is to enhance the base test to add a new optional feature key. This will require collaboration with the developers on the `hdfs-dev` mailing list.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 'Lax vs Strict' exceptions
|
||||||
|
|
||||||
|
The contract tests include the notion of strict vs lax exceptions. *Strict* exception reporting means: reports failures using specific subclasses of `IOException`, such as `FileNotFoundException`, `EOFException` and so on. *Lax* reporting means throws `IOException`.
|
||||||
|
|
||||||
|
While FileSystems SHOULD raise stricter exceptions, there may be reasons why they cannot. Raising lax exceptions is still allowed, it merely hampers diagnostics of failures in user applications. To declare that a FileSystem does not support the stricter exceptions, set the option `fs.contract.supports-strict-exceptions` to false.
|
||||||
|
|
||||||
|
### Supporting FileSystems with login and authentication parameters
|
||||||
|
|
||||||
|
Tests against remote FileSystems will require the URL to the FileSystem to be specified;
|
||||||
|
tests against remote FileSystems that require login details require usernames/IDs and passwords.
|
||||||
|
|
||||||
|
All these details MUST be required to be placed in the file `src/test/resources/contract-test-options.xml`, and your SCM tools configured to never commit this file to subversion, git or
|
||||||
|
equivalent. Furthermore, the build MUST be configured to never bundle this file in any `-test` artifacts generated. The Hadoop build does this, excluding `src/test/**/*.xml` from the JAR files.
|
||||||
|
|
||||||
|
The `AbstractFSContract` class automatically loads this resource file if present; specific keys for specific test cases can be added.
|
||||||
|
|
||||||
|
As an example, here are what S3N test keys look like:
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.fs.s3n</name>
|
||||||
|
<value>s3n://tests3contract</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.s3n.awsAccessKeyId</name>
|
||||||
|
<value>DONOTPCOMMITTHISKEYTOSCM</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.s3n.awsSecretAccessKey</name>
|
||||||
|
<value>DONOTEVERSHARETHISSECRETKEY!</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
|
||||||
|
The `AbstractBondedFSContract` automatically skips a test suite if the FileSystem URL is not defined in the property `fs.contract.test.fs.%s`, where `%s` matches the schema name of the FileSystem.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Important: passing the tests does not guarantee compatibility
|
||||||
|
|
||||||
|
Passing all the FileSystem contract tests does not mean that a filesystem can be described as "compatible with HDFS". The tests try to look at the isolated functionality of each operation, and focus on the preconditions and postconditions of each action. Core areas not covered are concurrency and aspects of failure across a distributed system.
|
||||||
|
|
||||||
|
* Consistency: are all changes immediately visible?
|
||||||
|
* Atomicity: are operations which HDFS guarantees to be atomic equally so on the new filesystem.
|
||||||
|
* Idempotency: if the filesystem implements any retry policy, is idempotent even while other clients manipulate the filesystem?
|
||||||
|
* Scalability: does it support files as large as HDFS, or as many in a single directory?
|
||||||
|
* Durability: do files actually last -and how long for?
|
||||||
|
|
||||||
|
Proof that this is is true is the fact that the Amazon S3 and OpenStack Swift object stores are eventually consistent object stores with non-atomic rename and delete operations. Single threaded test cases are unlikely to see some of the concurrency issues, while consistency is very often only visible in tests that span a datacenter.
|
||||||
|
|
||||||
|
There are also some specific aspects of the use of the FileSystem API:
|
||||||
|
|
||||||
|
* Compatibility with the `hadoop -fs` CLI.
|
||||||
|
* Whether the blocksize policy produces file splits that are suitable for analytics workss. (as an example, a blocksize of 1 matches the specification, but as it tells MapReduce jobs to work a byte at a time, unusable).
|
||||||
|
|
||||||
|
Tests that verify these behaviors are of course welcome.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Adding a new test suite
|
||||||
|
|
||||||
|
1. New tests should be split up with a test class per operation, as is done for `seek()`, `rename()`, `create()`, and so on. This is to match up the way that the FileSystem contract specification is split up by operation. It also makes it easier for FileSystem implementors to work on one test suite at a time.
|
||||||
|
2. Subclass `AbstractFSContractTestBase` with a new abstract test suite class. Again, use `Abstract` in the title.
|
||||||
|
3. Look at `org.apache.hadoop.fs.contract.ContractTestUtils` for utility classes to aid testing, with lots of filesystem-centric assertions. Use these to make assertions about the filesystem state, and to incude diagnostics information such as directory listings and dumps of mismatched files when an assertion actually fails.
|
||||||
|
4. Write tests for the local, raw local and HDFS filesystems -if one of these fails the tests then there is a sign of a problem -though be aware that they do have differnces
|
||||||
|
5. Test on the object stores once the core filesystems are passing the tests.
|
||||||
|
4. Try and log failures with as much detail as you can -the people debugging the failures will appreciate it.
|
||||||
|
|
||||||
|
|
||||||
|
### Root manipulation tests
|
||||||
|
|
||||||
|
Some tests work directly against the root filesystem, attempting to do things like rename "/" and similar actions. The root directory is "special", and it's important to test this, especially on non-POSIX filesystems such as object stores. These tests are potentially very destructive to native filesystems, so use care.
|
||||||
|
|
||||||
|
1. Add the tests under `AbstractRootDirectoryContractTest` or create a new test with (a) `Root` in the title and (b) a check in the setup method to skip the test if root tests are disabled:
|
||||||
|
|
||||||
|
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
|
||||||
|
|
||||||
|
1. Don't provide an implementation of this test suite to run against the local FS.
|
||||||
|
|
||||||
|
### Scalability tests
|
||||||
|
|
||||||
|
Tests designed to generate scalable load -and that includes a large number of small files, as well as fewer larger files, should be designed to be configurable, so that users of the test
|
||||||
|
suite can configure the number and size of files.
|
||||||
|
|
||||||
|
Be aware that on object stores, the directory rename operation is usually `O(files)*O(data)` while the delete operation is `O(files)`. The latter means even any directory cleanup operations may take time and can potentially timeout. It is important to design tests that work against remote filesystems with possible delays in all operations.
|
||||||
|
|
||||||
|
## Extending the specification
|
||||||
|
|
||||||
|
The specification is incomplete. It doesn't have complete coverage of the FileSystem classes, and there may be bits of the existing specified classes that are not covered.
|
||||||
|
|
||||||
|
1. Look at the implementations of a class/interface/method to see what they do, especially HDFS and local. These are the documentation of what is done today.
|
||||||
|
2. Look at the POSIX API specification.
|
||||||
|
3. Search through the HDFS JIRAs for discussions on FileSystem topics, and try to understand what was meant to happen, as well as what does happen.
|
||||||
|
4. Use an IDE to find out how methods are used in Hadoop, HBase and other parts of the stack. Although this assumes that these are representative Hadoop applications, it will at least show how applications *expect* a FileSystem to behave.
|
||||||
|
5. Look in the java.io source to see how the bunded FileSystem classes are expected to behave -and read their javadocs carefully.
|
||||||
|
5. If something is unclear -as on the hdfs-dev list.
|
||||||
|
6. Don't be afraid to write tests to act as experiments and clarify what actually happens. Use the HDFS behaviours as the normative guide.
|
|
@ -226,7 +226,7 @@ public class TestLocalFileSystem {
|
||||||
try {
|
try {
|
||||||
fileSys.mkdirs(bad_dir);
|
fileSys.mkdirs(bad_dir);
|
||||||
fail("Failed to detect existing file in path");
|
fail("Failed to detect existing file in path");
|
||||||
} catch (FileAlreadyExistsException e) {
|
} catch (ParentNotDirectoryException e) {
|
||||||
// Expected
|
// Expected
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,115 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is a filesystem contract for any class that bonds to a filesystem
|
||||||
|
* through the configuration.
|
||||||
|
*
|
||||||
|
* It looks for a definition of the test filesystem with the key
|
||||||
|
* derived from "fs.contract.test.fs.%s" -if found the value
|
||||||
|
* is converted to a URI and used to create a filesystem. If not -the
|
||||||
|
* tests are not enabled
|
||||||
|
*/
|
||||||
|
public abstract class AbstractBondedFSContract extends AbstractFSContract {
|
||||||
|
|
||||||
|
private static final Log LOG =
|
||||||
|
LogFactory.getLog(AbstractBondedFSContract.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pattern for the option for test filesystems from schema
|
||||||
|
*/
|
||||||
|
public static final String FSNAME_OPTION = "test.fs.%s";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor: loads the authentication keys if found
|
||||||
|
|
||||||
|
* @param conf configuration to work with
|
||||||
|
*/
|
||||||
|
protected AbstractBondedFSContract(Configuration conf) {
|
||||||
|
super(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
private String fsName;
|
||||||
|
private URI fsURI;
|
||||||
|
private FileSystem filesystem;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void init() throws IOException {
|
||||||
|
super.init();
|
||||||
|
//this test is only enabled if the test FS is present
|
||||||
|
fsName = loadFilesystemName(getScheme());
|
||||||
|
setEnabled(!fsName.isEmpty());
|
||||||
|
if (isEnabled()) {
|
||||||
|
try {
|
||||||
|
fsURI = new URI(fsName);
|
||||||
|
filesystem = FileSystem.get(fsURI, getConf());
|
||||||
|
} catch (URISyntaxException e) {
|
||||||
|
throw new IOException("Invalid URI " + fsName);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
throw new IOException("Invalid URI " + fsName, e);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG.info("skipping tests as FS name is not defined in "
|
||||||
|
+ getFilesystemConfKey());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load the name of a test filesystem.
|
||||||
|
* @param schema schema to look up
|
||||||
|
* @return the filesystem name -or "" if none was defined
|
||||||
|
*/
|
||||||
|
public String loadFilesystemName(String schema) {
|
||||||
|
return getOption(String.format(FSNAME_OPTION, schema), "");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the conf key for a filesystem
|
||||||
|
*/
|
||||||
|
protected String getFilesystemConfKey() {
|
||||||
|
return getConfKey(String.format(FSNAME_OPTION, getScheme()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FileSystem getTestFileSystem() throws IOException {
|
||||||
|
return filesystem;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Path getTestPath() {
|
||||||
|
Path path = new Path("/test");
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return getScheme() +" Contract against " + fsName;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,128 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test concat -if supported
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractAppendTest extends AbstractFSContractTestBase {
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(AbstractContractAppendTest.class);
|
||||||
|
|
||||||
|
private Path testPath;
|
||||||
|
private Path target;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setup() throws Exception {
|
||||||
|
super.setup();
|
||||||
|
skipIfUnsupported(SUPPORTS_APPEND);
|
||||||
|
|
||||||
|
//delete the test directory
|
||||||
|
testPath = path("test");
|
||||||
|
target = new Path(testPath, "target");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAppendToEmptyFile() throws Throwable {
|
||||||
|
touch(getFileSystem(), target);
|
||||||
|
byte[] dataset = dataset(256, 'a', 'z');
|
||||||
|
FSDataOutputStream outputStream = getFileSystem().append(target);
|
||||||
|
try {
|
||||||
|
outputStream.write(dataset);
|
||||||
|
} finally {
|
||||||
|
outputStream.close();
|
||||||
|
}
|
||||||
|
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
|
||||||
|
dataset.length);
|
||||||
|
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAppendNonexistentFile() throws Throwable {
|
||||||
|
try {
|
||||||
|
FSDataOutputStream out = getFileSystem().append(target);
|
||||||
|
//got here: trouble
|
||||||
|
out.close();
|
||||||
|
fail("expected a failure");
|
||||||
|
} catch (Exception e) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAppendToExistingFile() throws Throwable {
|
||||||
|
byte[] original = dataset(8192, 'A', 'Z');
|
||||||
|
byte[] appended = dataset(8192, '0', '9');
|
||||||
|
createFile(getFileSystem(), target, false, original);
|
||||||
|
FSDataOutputStream outputStream = getFileSystem().append(target);
|
||||||
|
outputStream.write(appended);
|
||||||
|
outputStream.close();
|
||||||
|
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
|
||||||
|
original.length + appended.length);
|
||||||
|
ContractTestUtils.validateFileContent(bytes,
|
||||||
|
new byte[] [] { original, appended });
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAppendMissingTarget() throws Throwable {
|
||||||
|
try {
|
||||||
|
FSDataOutputStream out = getFileSystem().append(target);
|
||||||
|
//got here: trouble
|
||||||
|
out.close();
|
||||||
|
fail("expected a failure");
|
||||||
|
} catch (Exception e) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRenameFileBeingAppended() throws Throwable {
|
||||||
|
touch(getFileSystem(), target);
|
||||||
|
assertPathExists("original file does not exist", target);
|
||||||
|
byte[] dataset = dataset(256, 'a', 'z');
|
||||||
|
FSDataOutputStream outputStream = getFileSystem().append(target);
|
||||||
|
outputStream.write(dataset);
|
||||||
|
Path renamed = new Path(testPath, "renamed");
|
||||||
|
outputStream.close();
|
||||||
|
String listing = ls(testPath);
|
||||||
|
|
||||||
|
//expected: the stream goes to the file that was being renamed, not
|
||||||
|
//the original path
|
||||||
|
assertPathExists("renamed destination file does not exist", renamed);
|
||||||
|
|
||||||
|
assertPathDoesNotExist("Source file found after rename during append:\n" +
|
||||||
|
listing, target);
|
||||||
|
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), renamed,
|
||||||
|
dataset.length);
|
||||||
|
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,112 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.assertFileHasLength;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test concat -if supported
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractConcatTest extends AbstractFSContractTestBase {
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(AbstractContractConcatTest.class);
|
||||||
|
|
||||||
|
private Path testPath;
|
||||||
|
private Path srcFile;
|
||||||
|
private Path zeroByteFile;
|
||||||
|
private Path target;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setup() throws Exception {
|
||||||
|
super.setup();
|
||||||
|
skipIfUnsupported(SUPPORTS_CONCAT);
|
||||||
|
|
||||||
|
//delete the test directory
|
||||||
|
testPath = path("test");
|
||||||
|
srcFile = new Path(testPath, "small.txt");
|
||||||
|
zeroByteFile = new Path(testPath, "zero.txt");
|
||||||
|
target = new Path(testPath, "target");
|
||||||
|
|
||||||
|
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
|
||||||
|
createFile(getFileSystem(), srcFile, false, block);
|
||||||
|
touch(getFileSystem(), zeroByteFile);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConcatEmptyFiles() throws Throwable {
|
||||||
|
touch(getFileSystem(), target);
|
||||||
|
try {
|
||||||
|
getFileSystem().concat(target, new Path[0]);
|
||||||
|
fail("expected a failure");
|
||||||
|
} catch (Exception e) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConcatMissingTarget() throws Throwable {
|
||||||
|
try {
|
||||||
|
getFileSystem().concat(target,
|
||||||
|
new Path[] { zeroByteFile});
|
||||||
|
fail("expected a failure");
|
||||||
|
} catch (Exception e) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConcatFileOnFile() throws Throwable {
|
||||||
|
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
|
||||||
|
createFile(getFileSystem(), target, false, block);
|
||||||
|
getFileSystem().concat(target,
|
||||||
|
new Path[] {srcFile});
|
||||||
|
assertFileHasLength(getFileSystem(), target, TEST_FILE_LEN *2);
|
||||||
|
ContractTestUtils.validateFileContent(
|
||||||
|
ContractTestUtils.readDataset(getFileSystem(),
|
||||||
|
target, TEST_FILE_LEN * 2),
|
||||||
|
new byte[][]{block, block});
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConcatOnSelf() throws Throwable {
|
||||||
|
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
|
||||||
|
createFile(getFileSystem(), target, false, block);
|
||||||
|
try {
|
||||||
|
getFileSystem().concat(target,
|
||||||
|
new Path[]{target});
|
||||||
|
} catch (Exception e) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,187 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.internal.AssumptionViolatedException;
|
||||||
|
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeTextFile;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test creating files, overwrite options &c
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractCreateTest extends
|
||||||
|
AbstractFSContractTestBase {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCreateNewFile() throws Throwable {
|
||||||
|
describe("Foundational 'create a file' test");
|
||||||
|
Path path = path("testCreateNewFile");
|
||||||
|
byte[] data = dataset(256, 'a', 'z');
|
||||||
|
writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false);
|
||||||
|
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
|
||||||
|
describe("Verify overwriting an existing file fails");
|
||||||
|
Path path = path("testCreateFileOverExistingFileNoOverwrite");
|
||||||
|
byte[] data = dataset(256, 'a', 'z');
|
||||||
|
writeDataset(getFileSystem(), path, data, data.length, 1024, false);
|
||||||
|
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
|
||||||
|
try {
|
||||||
|
writeDataset(getFileSystem(), path, data2, data2.length, 1024, false);
|
||||||
|
fail("writing without overwrite unexpectedly succeeded");
|
||||||
|
} catch (FileAlreadyExistsException expected) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(expected);
|
||||||
|
} catch (IOException relaxed) {
|
||||||
|
handleRelaxedException("Creating a file over a file with overwrite==false",
|
||||||
|
"FileAlreadyExistsException",
|
||||||
|
relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test catches some eventual consistency problems that blobstores exhibit,
|
||||||
|
* as we are implicitly verifying that updates are consistent. This
|
||||||
|
* is why different file lengths and datasets are used
|
||||||
|
* @throws Throwable
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testOverwriteExistingFile() throws Throwable {
|
||||||
|
describe("Overwrite an existing file and verify the new data is there");
|
||||||
|
Path path = path("testOverwriteExistingFile");
|
||||||
|
byte[] data = dataset(256, 'a', 'z');
|
||||||
|
writeDataset(getFileSystem(), path, data, data.length, 1024, false);
|
||||||
|
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
|
||||||
|
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
|
||||||
|
writeDataset(getFileSystem(), path, data2, data2.length, 1024, true);
|
||||||
|
ContractTestUtils.verifyFileContents(getFileSystem(), path, data2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOverwriteEmptyDirectory() throws Throwable {
|
||||||
|
describe("verify trying to create a file over an empty dir fails");
|
||||||
|
Path path = path("testOverwriteEmptyDirectory");
|
||||||
|
mkdirs(path);
|
||||||
|
assertIsDirectory(path);
|
||||||
|
byte[] data = dataset(256, 'a', 'z');
|
||||||
|
try {
|
||||||
|
writeDataset(getFileSystem(), path, data, data.length, 1024, true);
|
||||||
|
assertIsDirectory(path);
|
||||||
|
fail("write of file over empty dir succeeded");
|
||||||
|
} catch (FileAlreadyExistsException expected) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(expected);
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
handleRelaxedException("overwriting a dir with a file ",
|
||||||
|
"FileAlreadyExistsException",
|
||||||
|
e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
handleRelaxedException("overwriting a dir with a file ",
|
||||||
|
"FileAlreadyExistsException",
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
assertIsDirectory(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOverwriteNonEmptyDirectory() throws Throwable {
|
||||||
|
describe("verify trying to create a file over a non-empty dir fails");
|
||||||
|
Path path = path("testOverwriteNonEmptyDirectory");
|
||||||
|
mkdirs(path);
|
||||||
|
try {
|
||||||
|
assertIsDirectory(path);
|
||||||
|
} catch (AssertionError failure) {
|
||||||
|
if (isSupported(IS_BLOBSTORE)) {
|
||||||
|
// file/directory hack surfaces here
|
||||||
|
throw new AssumptionViolatedException(failure.toString()).initCause(failure);
|
||||||
|
}
|
||||||
|
// else: rethrow
|
||||||
|
throw failure;
|
||||||
|
}
|
||||||
|
Path child = new Path(path, "child");
|
||||||
|
writeTextFile(getFileSystem(), child, "child file", true);
|
||||||
|
byte[] data = dataset(256, 'a', 'z');
|
||||||
|
try {
|
||||||
|
writeDataset(getFileSystem(), path, data, data.length, 1024,
|
||||||
|
true);
|
||||||
|
FileStatus status = getFileSystem().getFileStatus(path);
|
||||||
|
|
||||||
|
boolean isDir = status.isDirectory();
|
||||||
|
if (!isDir && isSupported(IS_BLOBSTORE)) {
|
||||||
|
// object store: downgrade to a skip so that the failure is visible
|
||||||
|
// in test results
|
||||||
|
skip("Object store allows a file to overwrite a directory");
|
||||||
|
}
|
||||||
|
fail("write of file over dir succeeded");
|
||||||
|
} catch (FileAlreadyExistsException expected) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(expected);
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
handleRelaxedException("overwriting a dir with a file ",
|
||||||
|
"FileAlreadyExistsException",
|
||||||
|
e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
handleRelaxedException("overwriting a dir with a file ",
|
||||||
|
"FileAlreadyExistsException",
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
assertIsDirectory(path);
|
||||||
|
assertIsFile(child);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCreatedFileIsImmediatelyVisible() throws Throwable {
|
||||||
|
describe("verify that a newly created file exists as soon as open returns");
|
||||||
|
Path path = path("testCreatedFileIsImmediatelyVisible");
|
||||||
|
FSDataOutputStream out = null;
|
||||||
|
try {
|
||||||
|
out = getFileSystem().create(path,
|
||||||
|
false,
|
||||||
|
4096,
|
||||||
|
(short) 1,
|
||||||
|
1024);
|
||||||
|
if (!getFileSystem().exists(path)) {
|
||||||
|
|
||||||
|
if (isSupported(IS_BLOBSTORE)) {
|
||||||
|
// object store: downgrade to a skip so that the failure is visible
|
||||||
|
// in test results
|
||||||
|
skip("Filesystem is an object store and newly created files are not immediately visible");
|
||||||
|
}
|
||||||
|
assertPathExists("expected path to be visible before anything written",
|
||||||
|
path);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
IOUtils.closeStream(out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test creating files, overwrite options &c
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractDeleteTest extends
|
||||||
|
AbstractFSContractTestBase {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeleteEmptyDirNonRecursive() throws Throwable {
|
||||||
|
Path path = path("testDeleteEmptyDirNonRecursive");
|
||||||
|
mkdirs(path);
|
||||||
|
assertDeleted(path, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeleteEmptyDirRecursive() throws Throwable {
|
||||||
|
Path path = path("testDeleteEmptyDirRecursive");
|
||||||
|
mkdirs(path);
|
||||||
|
assertDeleted(path, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeleteNonexistentPathRecursive() throws Throwable {
|
||||||
|
Path path = path("testDeleteNonexistentPathRecursive");
|
||||||
|
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "leftover", path);
|
||||||
|
ContractTestUtils.rejectRootOperation(path);
|
||||||
|
assertFalse("Returned true attempting to delete"
|
||||||
|
+ " a nonexistent path " + path,
|
||||||
|
getFileSystem().delete(path, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeleteNonexistentPathNonRecursive() throws Throwable {
|
||||||
|
Path path = path("testDeleteNonexistentPathNonRecursive");
|
||||||
|
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "leftover", path);
|
||||||
|
ContractTestUtils.rejectRootOperation(path);
|
||||||
|
assertFalse("Returned true attempting to recursively delete"
|
||||||
|
+ " a nonexistent path " + path,
|
||||||
|
getFileSystem().delete(path, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeleteNonEmptyDirNonRecursive() throws Throwable {
|
||||||
|
Path path = path("testDeleteNonEmptyDirNonRecursive");
|
||||||
|
mkdirs(path);
|
||||||
|
Path file = new Path(path, "childfile");
|
||||||
|
ContractTestUtils.writeTextFile(getFileSystem(), file, "goodbye, world",
|
||||||
|
true);
|
||||||
|
try {
|
||||||
|
ContractTestUtils.rejectRootOperation(path);
|
||||||
|
boolean deleted = getFileSystem().delete(path, false);
|
||||||
|
fail("non recursive delete should have raised an exception," +
|
||||||
|
" but completed with exit code " + deleted);
|
||||||
|
} catch (IOException expected) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(expected);
|
||||||
|
}
|
||||||
|
ContractTestUtils.assertIsDirectory(getFileSystem(), path);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeleteNonEmptyDirRecursive() throws Throwable {
|
||||||
|
Path path = path("testDeleteNonEmptyDirNonRecursive");
|
||||||
|
mkdirs(path);
|
||||||
|
Path file = new Path(path, "childfile");
|
||||||
|
ContractTestUtils.writeTextFile(getFileSystem(), file, "goodbye, world",
|
||||||
|
true);
|
||||||
|
assertDeleted(path, true);
|
||||||
|
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "not deleted", file);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,115 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test directory operations
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractMkdirTest extends AbstractFSContractTestBase {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMkDirRmDir() throws Throwable {
|
||||||
|
FileSystem fs = getFileSystem();
|
||||||
|
|
||||||
|
Path dir = path("testMkDirRmDir");
|
||||||
|
assertPathDoesNotExist("directory already exists", dir);
|
||||||
|
fs.mkdirs(dir);
|
||||||
|
assertPathExists("mkdir failed", dir);
|
||||||
|
assertDeleted(dir, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMkDirRmRfDir() throws Throwable {
|
||||||
|
describe("create a directory then recursive delete it");
|
||||||
|
FileSystem fs = getFileSystem();
|
||||||
|
Path dir = path("testMkDirRmRfDir");
|
||||||
|
assertPathDoesNotExist("directory already exists", dir);
|
||||||
|
fs.mkdirs(dir);
|
||||||
|
assertPathExists("mkdir failed", dir);
|
||||||
|
assertDeleted(dir, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNoMkdirOverFile() throws Throwable {
|
||||||
|
describe("try to mkdir over a file");
|
||||||
|
FileSystem fs = getFileSystem();
|
||||||
|
Path path = path("testNoMkdirOverFile");
|
||||||
|
byte[] dataset = dataset(1024, ' ', 'z');
|
||||||
|
createFile(getFileSystem(), path, false, dataset);
|
||||||
|
try {
|
||||||
|
boolean made = fs.mkdirs(path);
|
||||||
|
fail("mkdirs did not fail over a file but returned " + made
|
||||||
|
+ "; " + ls(path));
|
||||||
|
} catch (ParentNotDirectoryException e) {
|
||||||
|
//parent is a directory
|
||||||
|
handleExpectedException(e);
|
||||||
|
} catch (FileAlreadyExistsException e) {
|
||||||
|
//also allowed as an exception (HDFS)
|
||||||
|
handleExpectedException(e);;
|
||||||
|
} catch (IOException e) {
|
||||||
|
//here the FS says "no create"
|
||||||
|
handleRelaxedException("mkdirs", "FileAlreadyExistsException", e);
|
||||||
|
}
|
||||||
|
assertIsFile(path);
|
||||||
|
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), path,
|
||||||
|
dataset.length);
|
||||||
|
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
|
||||||
|
assertPathExists("mkdir failed", path);
|
||||||
|
assertDeleted(path, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMkdirOverParentFile() throws Throwable {
|
||||||
|
describe("try to mkdir where a parent is a file");
|
||||||
|
FileSystem fs = getFileSystem();
|
||||||
|
Path path = path("testMkdirOverParentFile");
|
||||||
|
byte[] dataset = dataset(1024, ' ', 'z');
|
||||||
|
createFile(getFileSystem(), path, false, dataset);
|
||||||
|
Path child = new Path(path,"child-to-mkdir");
|
||||||
|
try {
|
||||||
|
boolean made = fs.mkdirs(child);
|
||||||
|
fail("mkdirs did not fail over a file but returned " + made
|
||||||
|
+ "; " + ls(path));
|
||||||
|
} catch (ParentNotDirectoryException e) {
|
||||||
|
//parent is a directory
|
||||||
|
handleExpectedException(e);
|
||||||
|
} catch (FileAlreadyExistsException e) {
|
||||||
|
handleExpectedException(e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
handleRelaxedException("mkdirs", "ParentNotDirectoryException", e);
|
||||||
|
}
|
||||||
|
assertIsFile(path);
|
||||||
|
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), path,
|
||||||
|
dataset.length);
|
||||||
|
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
|
||||||
|
assertPathExists("mkdir failed", path);
|
||||||
|
assertDeleted(path, true);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,155 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test Seek operations
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractOpenTest extends AbstractFSContractTestBase {
|
||||||
|
|
||||||
|
private FSDataInputStream instream;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Configuration createConfiguration() {
|
||||||
|
Configuration conf = super.createConfiguration();
|
||||||
|
conf.setInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, 4096);
|
||||||
|
return conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void teardown() throws Exception {
|
||||||
|
IOUtils.closeStream(instream);
|
||||||
|
instream = null;
|
||||||
|
super.teardown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOpenReadZeroByteFile() throws Throwable {
|
||||||
|
describe("create & read a 0 byte file");
|
||||||
|
Path path = path("zero.txt");
|
||||||
|
touch(getFileSystem(), path);
|
||||||
|
instream = getFileSystem().open(path);
|
||||||
|
assertEquals(0, instream.getPos());
|
||||||
|
//expect initial read to fail
|
||||||
|
int result = instream.read();
|
||||||
|
assertMinusOne("initial byte read", result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOpenReadDir() throws Throwable {
|
||||||
|
describe("create & read a directory");
|
||||||
|
Path path = path("zero.dir");
|
||||||
|
mkdirs(path);
|
||||||
|
try {
|
||||||
|
instream = getFileSystem().open(path);
|
||||||
|
//at this point we've opened a directory
|
||||||
|
fail("A directory has been opened for reading");
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
handleExpectedException(e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
handleRelaxedException("opening a directory for reading",
|
||||||
|
"FileNotFoundException",
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOpenReadDirWithChild() throws Throwable {
|
||||||
|
describe("create & read a directory which has a child");
|
||||||
|
Path path = path("zero.dir");
|
||||||
|
mkdirs(path);
|
||||||
|
Path path2 = new Path(path, "child");
|
||||||
|
mkdirs(path2);
|
||||||
|
|
||||||
|
try {
|
||||||
|
instream = getFileSystem().open(path);
|
||||||
|
//at this point we've opened a directory
|
||||||
|
fail("A directory has been opened for reading");
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
handleExpectedException(e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
handleRelaxedException("opening a directory for reading",
|
||||||
|
"FileNotFoundException",
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOpenFileTwice() throws Throwable {
|
||||||
|
describe("verify that two opened file streams are independent");
|
||||||
|
Path path = path("testopenfiletwice.txt");
|
||||||
|
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
|
||||||
|
//this file now has a simple rule: offset => value
|
||||||
|
createFile(getFileSystem(), path, false, block);
|
||||||
|
//open first
|
||||||
|
FSDataInputStream instream1 = getFileSystem().open(path);
|
||||||
|
int c = instream1.read();
|
||||||
|
assertEquals(0,c);
|
||||||
|
FSDataInputStream instream2 = null;
|
||||||
|
try {
|
||||||
|
instream2 = getFileSystem().open(path);
|
||||||
|
assertEquals("first read of instream 2", 0, instream2.read());
|
||||||
|
assertEquals("second read of instream 1", 1, instream1.read());
|
||||||
|
instream1.close();
|
||||||
|
assertEquals("second read of instream 2", 1, instream2.read());
|
||||||
|
//close instream1 again
|
||||||
|
instream1.close();
|
||||||
|
} finally {
|
||||||
|
IOUtils.closeStream(instream1);
|
||||||
|
IOUtils.closeStream(instream2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSequentialRead() throws Throwable {
|
||||||
|
describe("verify that sequential read() operations return values");
|
||||||
|
Path path = path("testsequentialread.txt");
|
||||||
|
int len = 4;
|
||||||
|
int base = 0x40; // 64
|
||||||
|
byte[] block = dataset(len, base, base + len);
|
||||||
|
//this file now has a simple rule: offset => (value | 0x40)
|
||||||
|
createFile(getFileSystem(), path, false, block);
|
||||||
|
//open first
|
||||||
|
instream = getFileSystem().open(path);
|
||||||
|
assertEquals(base, instream.read());
|
||||||
|
assertEquals(base + 1, instream.read());
|
||||||
|
assertEquals(base + 2, instream.read());
|
||||||
|
assertEquals(base + 3, instream.read());
|
||||||
|
// and now, failures
|
||||||
|
assertEquals(-1, instream.read());
|
||||||
|
assertEquals(-1, instream.read());
|
||||||
|
instream.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,185 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test creating files, overwrite options &c
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractRenameTest extends
|
||||||
|
AbstractFSContractTestBase {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRenameNewFileSameDir() throws Throwable {
|
||||||
|
describe("rename a file into a new file in the same directory");
|
||||||
|
Path renameSrc = path("rename_src");
|
||||||
|
Path renameTarget = path("rename_dest");
|
||||||
|
byte[] data = dataset(256, 'a', 'z');
|
||||||
|
writeDataset(getFileSystem(), renameSrc,
|
||||||
|
data, data.length, 1024 * 1024, false);
|
||||||
|
boolean rename = rename(renameSrc, renameTarget);
|
||||||
|
assertTrue("rename("+renameSrc+", "+ renameTarget+") returned false",
|
||||||
|
rename);
|
||||||
|
ContractTestUtils.assertListStatusFinds(getFileSystem(),
|
||||||
|
renameTarget.getParent(), renameTarget);
|
||||||
|
ContractTestUtils.verifyFileContents(getFileSystem(), renameTarget, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRenameNonexistentFile() throws Throwable {
|
||||||
|
describe("rename a file into a new file in the same directory");
|
||||||
|
Path missing = path("testRenameNonexistentFileSrc");
|
||||||
|
Path target = path("testRenameNonexistentFileDest");
|
||||||
|
boolean renameReturnsFalseOnFailure =
|
||||||
|
isSupported(ContractOptions.RENAME_RETURNS_FALSE_IF_SOURCE_MISSING);
|
||||||
|
mkdirs(missing.getParent());
|
||||||
|
try {
|
||||||
|
boolean renamed = rename(missing, target);
|
||||||
|
//expected an exception
|
||||||
|
if (!renameReturnsFalseOnFailure) {
|
||||||
|
String destDirLS = generateAndLogErrorListing(missing, target);
|
||||||
|
fail("expected rename(" + missing + ", " + target + " ) to fail," +
|
||||||
|
" got a result of " + renamed
|
||||||
|
+ " and a destination directory of " + destDirLS);
|
||||||
|
} else {
|
||||||
|
// at least one FS only returns false here, if that is the case
|
||||||
|
// warn but continue
|
||||||
|
getLog().warn("Rename returned {} renaming a nonexistent file", renamed);
|
||||||
|
assertFalse("Renaming a missing file returned true", renamed);
|
||||||
|
}
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
if (renameReturnsFalseOnFailure) {
|
||||||
|
ContractTestUtils.fail(
|
||||||
|
"Renaming a missing file unexpectedly threw an exception", e);
|
||||||
|
}
|
||||||
|
handleExpectedException(e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
handleRelaxedException("rename nonexistent file",
|
||||||
|
"FileNotFoundException",
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
assertPathDoesNotExist("rename nonexistent file created a destination file", target);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rename test -handles filesystems that will overwrite the destination
|
||||||
|
* as well as those that do not (i.e. HDFS).
|
||||||
|
* @throws Throwable
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testRenameFileOverExistingFile() throws Throwable {
|
||||||
|
describe("Verify renaming a file onto an existing file matches expectations");
|
||||||
|
Path srcFile = path("source-256.txt");
|
||||||
|
byte[] srcData = dataset(256, 'a', 'z');
|
||||||
|
writeDataset(getFileSystem(), srcFile, srcData, srcData.length, 1024, false);
|
||||||
|
Path destFile = path("dest-512.txt");
|
||||||
|
byte[] destData = dataset(512, 'A', 'Z');
|
||||||
|
writeDataset(getFileSystem(), destFile, destData, destData.length, 1024, false);
|
||||||
|
assertIsFile(destFile);
|
||||||
|
boolean renameOverwritesDest = isSupported(RENAME_OVERWRITES_DEST);
|
||||||
|
boolean renameReturnsFalseOnRenameDestExists =
|
||||||
|
!isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS);
|
||||||
|
boolean destUnchanged = true;
|
||||||
|
try {
|
||||||
|
boolean renamed = rename(srcFile, destFile);
|
||||||
|
|
||||||
|
if (renameOverwritesDest) {
|
||||||
|
// the filesystem supports rename(file, file2) by overwriting file2
|
||||||
|
|
||||||
|
assertTrue("Rename returned false", renamed);
|
||||||
|
destUnchanged = false;
|
||||||
|
} else {
|
||||||
|
// rename is rejected by returning 'false' or throwing an exception
|
||||||
|
if (renamed && !renameReturnsFalseOnRenameDestExists) {
|
||||||
|
//expected an exception
|
||||||
|
String destDirLS = generateAndLogErrorListing(srcFile, destFile);
|
||||||
|
getLog().error("dest dir {}", destDirLS);
|
||||||
|
fail("expected rename(" + srcFile + ", " + destFile + " ) to fail," +
|
||||||
|
" but got success and destination of " + destDirLS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (FileAlreadyExistsException e) {
|
||||||
|
handleExpectedException(e);
|
||||||
|
}
|
||||||
|
// verify that the destination file is as expected based on the expected
|
||||||
|
// outcome
|
||||||
|
ContractTestUtils.verifyFileContents(getFileSystem(), destFile,
|
||||||
|
destUnchanged? destData: srcData);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRenameDirIntoExistingDir() throws Throwable {
|
||||||
|
describe("Verify renaming a dir into an existing dir puts it underneath"
|
||||||
|
+" and leaves existing files alone");
|
||||||
|
FileSystem fs = getFileSystem();
|
||||||
|
String sourceSubdir = "source";
|
||||||
|
Path srcDir = path(sourceSubdir);
|
||||||
|
Path srcFilePath = new Path(srcDir, "source-256.txt");
|
||||||
|
byte[] srcDataset = dataset(256, 'a', 'z');
|
||||||
|
writeDataset(fs, srcFilePath, srcDataset, srcDataset.length, 1024, false);
|
||||||
|
Path destDir = path("dest");
|
||||||
|
|
||||||
|
Path destFilePath = new Path(destDir, "dest-512.txt");
|
||||||
|
byte[] destDateset = dataset(512, 'A', 'Z');
|
||||||
|
writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024, false);
|
||||||
|
assertIsFile(destFilePath);
|
||||||
|
|
||||||
|
boolean rename = rename(srcDir, destDir);
|
||||||
|
Path renamedSrc = new Path(destDir, sourceSubdir);
|
||||||
|
assertIsFile(destFilePath);
|
||||||
|
assertIsDirectory(renamedSrc);
|
||||||
|
ContractTestUtils.verifyFileContents(fs, destFilePath, destDateset);
|
||||||
|
assertTrue("rename returned false though the contents were copied", rename);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRenameFileNonexistentDir() throws Throwable {
|
||||||
|
describe("rename a file into a new file in the same directory");
|
||||||
|
Path renameSrc = path("testRenameSrc");
|
||||||
|
Path renameTarget = path("subdir/testRenameTarget");
|
||||||
|
byte[] data = dataset(256, 'a', 'z');
|
||||||
|
writeDataset(getFileSystem(), renameSrc, data, data.length, 1024 * 1024,
|
||||||
|
false);
|
||||||
|
boolean renameCreatesDestDirs = isSupported(RENAME_CREATES_DEST_DIRS);
|
||||||
|
|
||||||
|
try {
|
||||||
|
boolean rename = rename(renameSrc, renameTarget);
|
||||||
|
if (renameCreatesDestDirs) {
|
||||||
|
assertTrue(rename);
|
||||||
|
ContractTestUtils.verifyFileContents(getFileSystem(), renameTarget, data);
|
||||||
|
} else {
|
||||||
|
assertFalse(rename);
|
||||||
|
ContractTestUtils.verifyFileContents(getFileSystem(), renameSrc, data);
|
||||||
|
}
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
// allowed unless that rename flag is set
|
||||||
|
assertFalse(renameCreatesDestDirs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,123 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class does things to the root directory.
|
||||||
|
* Only subclass this for tests against transient filesystems where
|
||||||
|
* you don't care about the data.
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractRootDirectoryTest extends AbstractFSContractTestBase {
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setup() throws Exception {
|
||||||
|
super.setup();
|
||||||
|
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMkDirDepth1() throws Throwable {
|
||||||
|
FileSystem fs = getFileSystem();
|
||||||
|
Path dir = new Path("/testmkdirdepth1");
|
||||||
|
assertPathDoesNotExist("directory already exists", dir);
|
||||||
|
fs.mkdirs(dir);
|
||||||
|
ContractTestUtils.assertIsDirectory(getFileSystem(), dir);
|
||||||
|
assertPathExists("directory already exists", dir);
|
||||||
|
assertDeleted(dir, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRmEmptyRootDirNonRecursive() throws Throwable {
|
||||||
|
//extra sanity checks here to avoid support calls about complete loss of data
|
||||||
|
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
|
||||||
|
Path root = new Path("/");
|
||||||
|
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
|
||||||
|
boolean deleted = getFileSystem().delete(root, true);
|
||||||
|
LOG.info("rm / of empty dir result is {}", deleted);
|
||||||
|
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRmNonEmptyRootDirNonRecursive() throws Throwable {
|
||||||
|
//extra sanity checks here to avoid support calls about complete loss of data
|
||||||
|
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
|
||||||
|
Path root = new Path("/");
|
||||||
|
String touchfile = "/testRmNonEmptyRootDirNonRecursive";
|
||||||
|
Path file = new Path(touchfile);
|
||||||
|
ContractTestUtils.touch(getFileSystem(), file);
|
||||||
|
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
|
||||||
|
try {
|
||||||
|
boolean deleted = getFileSystem().delete(root, false);
|
||||||
|
fail("non recursive delete should have raised an exception," +
|
||||||
|
" but completed with exit code " + deleted);
|
||||||
|
} catch (IOException e) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(e);
|
||||||
|
} finally {
|
||||||
|
getFileSystem().delete(file, false);
|
||||||
|
}
|
||||||
|
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRmRootRecursive() throws Throwable {
|
||||||
|
//extra sanity checks here to avoid support calls about complete loss of data
|
||||||
|
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
|
||||||
|
Path root = new Path("/");
|
||||||
|
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
|
||||||
|
Path file = new Path("/testRmRootRecursive");
|
||||||
|
ContractTestUtils.touch(getFileSystem(), file);
|
||||||
|
boolean deleted = getFileSystem().delete(root, true);
|
||||||
|
ContractTestUtils.assertIsDirectory(getFileSystem(), root);
|
||||||
|
LOG.info("rm -rf / result is {}", deleted);
|
||||||
|
if (deleted) {
|
||||||
|
assertPathDoesNotExist("expected file to be deleted", file);
|
||||||
|
} else {
|
||||||
|
assertPathExists("expected file to be preserved", file);;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCreateFileOverRoot() throws Throwable {
|
||||||
|
Path root = new Path("/");
|
||||||
|
byte[] dataset = dataset(1024, ' ', 'z');
|
||||||
|
try {
|
||||||
|
createFile(getFileSystem(), root, false, dataset);
|
||||||
|
fail("expected an exception, got a file created over root: " + ls(root));
|
||||||
|
} catch (IOException e) {
|
||||||
|
//expected
|
||||||
|
handleExpectedException(e);
|
||||||
|
}
|
||||||
|
assertIsDirectory(root);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,348 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.EOFException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyRead;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test Seek operations
|
||||||
|
*/
|
||||||
|
public abstract class AbstractContractSeekTest extends AbstractFSContractTestBase {
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(AbstractContractSeekTest.class);
|
||||||
|
|
||||||
|
public static final int DEFAULT_RANDOM_SEEK_COUNT = 100;
|
||||||
|
|
||||||
|
private Path testPath;
|
||||||
|
private Path smallSeekFile;
|
||||||
|
private Path zeroByteFile;
|
||||||
|
private FSDataInputStream instream;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setup() throws Exception {
|
||||||
|
super.setup();
|
||||||
|
skipIfUnsupported(SUPPORTS_SEEK);
|
||||||
|
//delete the test directory
|
||||||
|
testPath = getContract().getTestPath();
|
||||||
|
smallSeekFile = path("seekfile.txt");
|
||||||
|
zeroByteFile = path("zero.txt");
|
||||||
|
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
|
||||||
|
//this file now has a simple rule: offset => value
|
||||||
|
createFile(getFileSystem(), smallSeekFile, false, block);
|
||||||
|
touch(getFileSystem(), zeroByteFile);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Configuration createConfiguration() {
|
||||||
|
Configuration conf = super.createConfiguration();
|
||||||
|
conf.setInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, 4096);
|
||||||
|
return conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void teardown() throws Exception {
|
||||||
|
IOUtils.closeStream(instream);
|
||||||
|
instream = null;
|
||||||
|
super.teardown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSeekZeroByteFile() throws Throwable {
|
||||||
|
describe("seek and read a 0 byte file");
|
||||||
|
instream = getFileSystem().open(zeroByteFile);
|
||||||
|
assertEquals(0, instream.getPos());
|
||||||
|
//expect initial read to fai;
|
||||||
|
int result = instream.read();
|
||||||
|
assertMinusOne("initial byte read", result);
|
||||||
|
byte[] buffer = new byte[1];
|
||||||
|
//expect that seek to 0 works
|
||||||
|
instream.seek(0);
|
||||||
|
//reread, expect same exception
|
||||||
|
result = instream.read();
|
||||||
|
assertMinusOne("post-seek byte read", result);
|
||||||
|
result = instream.read(buffer, 0, 1);
|
||||||
|
assertMinusOne("post-seek buffer read", result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBlockReadZeroByteFile() throws Throwable {
|
||||||
|
describe("do a block read on a 0 byte file");
|
||||||
|
instream = getFileSystem().open(zeroByteFile);
|
||||||
|
assertEquals(0, instream.getPos());
|
||||||
|
//expect that seek to 0 works
|
||||||
|
byte[] buffer = new byte[1];
|
||||||
|
int result = instream.read(buffer, 0, 1);
|
||||||
|
assertMinusOne("block read zero byte file", result);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Seek and read on a closed file.
|
||||||
|
* Some filesystems let callers seek on a closed file -these must
|
||||||
|
* still fail on the subsequent reads.
|
||||||
|
* @throws Throwable
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSeekReadClosedFile() throws Throwable {
|
||||||
|
boolean supportsSeekOnClosedFiles = isSupported(SUPPORTS_SEEK_ON_CLOSED_FILE);
|
||||||
|
|
||||||
|
instream = getFileSystem().open(smallSeekFile);
|
||||||
|
getLog().debug(
|
||||||
|
"Stream is of type " + instream.getClass().getCanonicalName());
|
||||||
|
instream.close();
|
||||||
|
try {
|
||||||
|
instream.seek(0);
|
||||||
|
if (!supportsSeekOnClosedFiles) {
|
||||||
|
fail("seek succeeded on a closed stream");
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
//expected a closed file
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
int data = instream.available();
|
||||||
|
fail("read() succeeded on a closed stream, got " + data);
|
||||||
|
} catch (IOException e) {
|
||||||
|
//expected a closed file
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
int data = instream.read();
|
||||||
|
fail("read() succeeded on a closed stream, got " + data);
|
||||||
|
} catch (IOException e) {
|
||||||
|
//expected a closed file
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
byte[] buffer = new byte[1];
|
||||||
|
int result = instream.read(buffer, 0, 1);
|
||||||
|
fail("read(buffer, 0, 1) succeeded on a closed stream, got " + result);
|
||||||
|
} catch (IOException e) {
|
||||||
|
//expected a closed file
|
||||||
|
}
|
||||||
|
//what position does a closed file have?
|
||||||
|
try {
|
||||||
|
long offset = instream.getPos();
|
||||||
|
} catch (IOException e) {
|
||||||
|
// its valid to raise error here; but the test is applied to make
|
||||||
|
// sure there's no other exception like an NPE.
|
||||||
|
|
||||||
|
}
|
||||||
|
//and close again
|
||||||
|
instream.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNegativeSeek() throws Throwable {
|
||||||
|
instream = getFileSystem().open(smallSeekFile);
|
||||||
|
assertEquals(0, instream.getPos());
|
||||||
|
try {
|
||||||
|
instream.seek(-1);
|
||||||
|
long p = instream.getPos();
|
||||||
|
LOG.warn("Seek to -1 returned a position of " + p);
|
||||||
|
int result = instream.read();
|
||||||
|
fail(
|
||||||
|
"expected an exception, got data " + result + " at a position of " + p);
|
||||||
|
} catch (EOFException e) {
|
||||||
|
//bad seek -expected
|
||||||
|
handleExpectedException(e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
//bad seek -expected, but not as preferred as an EOFException
|
||||||
|
handleRelaxedException("a negative seek", "EOFException", e);
|
||||||
|
}
|
||||||
|
assertEquals(0, instream.getPos());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSeekFile() throws Throwable {
|
||||||
|
describe("basic seek operations");
|
||||||
|
instream = getFileSystem().open(smallSeekFile);
|
||||||
|
assertEquals(0, instream.getPos());
|
||||||
|
//expect that seek to 0 works
|
||||||
|
instream.seek(0);
|
||||||
|
int result = instream.read();
|
||||||
|
assertEquals(0, result);
|
||||||
|
assertEquals(1, instream.read());
|
||||||
|
assertEquals(2, instream.getPos());
|
||||||
|
assertEquals(2, instream.read());
|
||||||
|
assertEquals(3, instream.getPos());
|
||||||
|
instream.seek(128);
|
||||||
|
assertEquals(128, instream.getPos());
|
||||||
|
assertEquals(128, instream.read());
|
||||||
|
instream.seek(63);
|
||||||
|
assertEquals(63, instream.read());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSeekAndReadPastEndOfFile() throws Throwable {
|
||||||
|
describe("verify that reading past the last bytes in the file returns -1");
|
||||||
|
instream = getFileSystem().open(smallSeekFile);
|
||||||
|
assertEquals(0, instream.getPos());
|
||||||
|
//expect that seek to 0 works
|
||||||
|
//go just before the end
|
||||||
|
instream.seek(TEST_FILE_LEN - 2);
|
||||||
|
assertTrue("Premature EOF", instream.read() != -1);
|
||||||
|
assertTrue("Premature EOF", instream.read() != -1);
|
||||||
|
assertMinusOne("read past end of file", instream.read());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable {
|
||||||
|
describe("do a seek past the EOF, then verify the stream recovers");
|
||||||
|
instream = getFileSystem().open(smallSeekFile);
|
||||||
|
//go just before the end. This may or may not fail; it may be delayed until the
|
||||||
|
//read
|
||||||
|
boolean canSeekPastEOF =
|
||||||
|
!getContract().isSupported(ContractOptions.REJECTS_SEEK_PAST_EOF, true);
|
||||||
|
try {
|
||||||
|
instream.seek(TEST_FILE_LEN + 1);
|
||||||
|
//if this doesn't trigger, then read() is expected to fail
|
||||||
|
assertMinusOne("read after seeking past EOF", instream.read());
|
||||||
|
} catch (EOFException e) {
|
||||||
|
//This is an error iff the FS claims to be able to seek past the EOF
|
||||||
|
if (canSeekPastEOF) {
|
||||||
|
//a failure wasn't expected
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
handleExpectedException(e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
//This is an error iff the FS claims to be able to seek past the EOF
|
||||||
|
if (canSeekPastEOF) {
|
||||||
|
//a failure wasn't expected
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
handleRelaxedException("a seek past the end of the file",
|
||||||
|
"EOFException", e);
|
||||||
|
}
|
||||||
|
//now go back and try to read from a valid point in the file
|
||||||
|
instream.seek(1);
|
||||||
|
assertTrue("Premature EOF", instream.read() != -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Seek round a file bigger than IO buffers
|
||||||
|
* @throws Throwable
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSeekBigFile() throws Throwable {
|
||||||
|
describe("Seek round a large file and verify the bytes are what is expected");
|
||||||
|
Path testSeekFile = path("bigseekfile.txt");
|
||||||
|
byte[] block = dataset(65536, 0, 255);
|
||||||
|
createFile(getFileSystem(), testSeekFile, false, block);
|
||||||
|
instream = getFileSystem().open(testSeekFile);
|
||||||
|
assertEquals(0, instream.getPos());
|
||||||
|
//expect that seek to 0 works
|
||||||
|
instream.seek(0);
|
||||||
|
int result = instream.read();
|
||||||
|
assertEquals(0, result);
|
||||||
|
assertEquals(1, instream.read());
|
||||||
|
assertEquals(2, instream.read());
|
||||||
|
|
||||||
|
//do seek 32KB ahead
|
||||||
|
instream.seek(32768);
|
||||||
|
assertEquals("@32768", block[32768], (byte) instream.read());
|
||||||
|
instream.seek(40000);
|
||||||
|
assertEquals("@40000", block[40000], (byte) instream.read());
|
||||||
|
instream.seek(8191);
|
||||||
|
assertEquals("@8191", block[8191], (byte) instream.read());
|
||||||
|
instream.seek(0);
|
||||||
|
assertEquals("@0", 0, (byte) instream.read());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
|
||||||
|
describe(
|
||||||
|
"verify that a positioned read does not change the getPos() value");
|
||||||
|
Path testSeekFile = path("bigseekfile.txt");
|
||||||
|
byte[] block = dataset(65536, 0, 255);
|
||||||
|
createFile(getFileSystem(), testSeekFile, false, block);
|
||||||
|
instream = getFileSystem().open(testSeekFile);
|
||||||
|
instream.seek(39999);
|
||||||
|
assertTrue(-1 != instream.read());
|
||||||
|
assertEquals(40000, instream.getPos());
|
||||||
|
|
||||||
|
byte[] readBuffer = new byte[256];
|
||||||
|
instream.read(128, readBuffer, 0, readBuffer.length);
|
||||||
|
//have gone back
|
||||||
|
assertEquals(40000, instream.getPos());
|
||||||
|
//content is the same too
|
||||||
|
assertEquals("@40000", block[40000], (byte) instream.read());
|
||||||
|
//now verify the picked up data
|
||||||
|
for (int i = 0; i < 256; i++) {
|
||||||
|
assertEquals("@" + i, block[i + 128], readBuffer[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lifted from TestLocalFileSystem:
|
||||||
|
* Regression test for HADOOP-9307: BufferedFSInputStream returning
|
||||||
|
* wrong results after certain sequences of seeks and reads.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testRandomSeeks() throws Throwable {
|
||||||
|
int limit = getContract().getLimit(TEST_RANDOM_SEEK_COUNT,
|
||||||
|
DEFAULT_RANDOM_SEEK_COUNT);
|
||||||
|
describe("Testing " + limit + " random seeks");
|
||||||
|
int filesize = 10 * 1024;
|
||||||
|
byte[] buf = dataset(filesize, 0, 255);
|
||||||
|
Path randomSeekFile = path("testrandomseeks.bin");
|
||||||
|
createFile(getFileSystem(), randomSeekFile, false, buf);
|
||||||
|
Random r = new Random();
|
||||||
|
FSDataInputStream stm = getFileSystem().open(randomSeekFile);
|
||||||
|
|
||||||
|
// Record the sequence of seeks and reads which trigger a failure.
|
||||||
|
int[] seeks = new int[10];
|
||||||
|
int[] reads = new int[10];
|
||||||
|
try {
|
||||||
|
for (int i = 0; i < limit; i++) {
|
||||||
|
int seekOff = r.nextInt(buf.length);
|
||||||
|
int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
|
||||||
|
|
||||||
|
seeks[i % seeks.length] = seekOff;
|
||||||
|
reads[i % reads.length] = toRead;
|
||||||
|
verifyRead(stm, buf, seekOff, toRead);
|
||||||
|
}
|
||||||
|
} catch (AssertionError afe) {
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
sb.append("Sequence of actions:\n");
|
||||||
|
for (int j = 0; j < seeks.length; j++) {
|
||||||
|
sb.append("seek @ ").append(seeks[j]).append(" ")
|
||||||
|
.append("read ").append(reads[j]).append("\n");
|
||||||
|
}
|
||||||
|
LOG.error(sb.toString());
|
||||||
|
throw afe;
|
||||||
|
} finally {
|
||||||
|
stm.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,201 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.conf.Configured;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class representing a filesystem contract that a filesystem
|
||||||
|
* implementation is expected implement.
|
||||||
|
*
|
||||||
|
* Part of this contract class is to allow FS implementations to
|
||||||
|
* provide specific opt outs and limits, so that tests can be
|
||||||
|
* skip unsupported features (e.g. case sensitivity tests),
|
||||||
|
* dangerous operations (e.g. trying to delete the root directory),
|
||||||
|
* and limit filesize and other numeric variables for scale tests
|
||||||
|
*/
|
||||||
|
public abstract class AbstractFSContract extends Configured {
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(AbstractFSContract.class);
|
||||||
|
|
||||||
|
private boolean enabled = true;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor: loads the authentication keys if found
|
||||||
|
* @param conf configuration to work with
|
||||||
|
*/
|
||||||
|
protected AbstractFSContract(Configuration conf) {
|
||||||
|
super(conf);
|
||||||
|
if (maybeAddConfResource(ContractOptions.CONTRACT_OPTIONS_RESOURCE)) {
|
||||||
|
LOG.debug("Loaded authentication keys from {}", ContractOptions.CONTRACT_OPTIONS_RESOURCE);
|
||||||
|
} else {
|
||||||
|
LOG.debug("Not loaded: {}", ContractOptions.CONTRACT_OPTIONS_RESOURCE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Any initialisation logic can go here
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public void init() throws IOException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a configuration resource to this instance's configuration
|
||||||
|
* @param resource resource reference
|
||||||
|
* @throws AssertionError if the resource was not found.
|
||||||
|
*/
|
||||||
|
protected void addConfResource(String resource) {
|
||||||
|
boolean found = maybeAddConfResource(resource);
|
||||||
|
Assert.assertTrue("Resource not found " + resource, found);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a configuration resource to this instance's configuration,
|
||||||
|
* return true if the resource was found
|
||||||
|
* @param resource resource reference
|
||||||
|
*/
|
||||||
|
protected boolean maybeAddConfResource(String resource) {
|
||||||
|
URL url = this.getClass().getClassLoader().getResource(resource);
|
||||||
|
boolean found = url != null;
|
||||||
|
if (found) {
|
||||||
|
getConf().addResource(resource);
|
||||||
|
}
|
||||||
|
return found;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the FS from a URI. The default implementation just retrieves
|
||||||
|
* it from the norrmal FileSystem factory/cache, with the local configuration
|
||||||
|
* @param uri URI of FS
|
||||||
|
* @return the filesystem
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public FileSystem getFileSystem(URI uri) throws IOException {
|
||||||
|
return FileSystem.get(uri, getConf());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the filesystem for these tests
|
||||||
|
* @return the test fs
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public abstract FileSystem getTestFileSystem() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the scheme of this FS
|
||||||
|
* @return the scheme this FS supports
|
||||||
|
*/
|
||||||
|
public abstract String getScheme();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the path string for tests, e.g. <code>file:///tmp</code>
|
||||||
|
* @return a path in the test FS
|
||||||
|
*/
|
||||||
|
public abstract Path getTestPath();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Boolean to indicate whether or not the contract test are enabled
|
||||||
|
* for this test run.
|
||||||
|
* @return true if the tests can be run.
|
||||||
|
*/
|
||||||
|
public boolean isEnabled() {
|
||||||
|
return enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Boolean to indicate whether or not the contract test are enabled
|
||||||
|
* for this test run.
|
||||||
|
* @param enabled flag which must be true if the tests can be run.
|
||||||
|
*/
|
||||||
|
public void setEnabled(boolean enabled) {
|
||||||
|
this.enabled = enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query for a feature being supported. This may include a probe for the feature
|
||||||
|
*
|
||||||
|
* @param feature feature to query
|
||||||
|
* @param defval default value
|
||||||
|
* @return true if the feature is supported
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public boolean isSupported(String feature, boolean defval) {
|
||||||
|
return getConf().getBoolean(getConfKey(feature), defval);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query for a feature's limit. This may include a probe for the feature
|
||||||
|
*
|
||||||
|
* @param feature feature to query
|
||||||
|
* @param defval default value
|
||||||
|
* @return true if the feature is supported
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public int getLimit(String feature, int defval) {
|
||||||
|
return getConf().getInt(getConfKey(feature), defval);
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getOption(String feature, String defval) {
|
||||||
|
return getConf().get(getConfKey(feature), defval);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build a configuration key
|
||||||
|
* @param feature feature to query
|
||||||
|
* @return the configuration key base with the feature appended
|
||||||
|
*/
|
||||||
|
public String getConfKey(String feature) {
|
||||||
|
return ContractOptions.FS_CONTRACT_KEY + feature;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a URI off the scheme
|
||||||
|
* @param path path of URI
|
||||||
|
* @return a URI
|
||||||
|
* @throws IOException if the URI could not be created
|
||||||
|
*/
|
||||||
|
protected URI toURI(String path) throws IOException {
|
||||||
|
try {
|
||||||
|
return new URI(getScheme(),path, null);
|
||||||
|
} catch (URISyntaxException e) {
|
||||||
|
throw new IOException(e.toString() + " with " + path, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "FSContract for " + getScheme();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,363 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.internal.AssumptionViolatedException;
|
||||||
|
import org.junit.rules.Timeout;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URI;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
|
||||||
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is the base class for all the contract tests
|
||||||
|
*/
|
||||||
|
public abstract class AbstractFSContractTestBase extends Assert
|
||||||
|
implements ContractOptions {
|
||||||
|
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(AbstractFSContractTestBase.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Length of files to work with: {@value}
|
||||||
|
*/
|
||||||
|
public static final int TEST_FILE_LEN = 1024;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* standard test timeout: {@value}
|
||||||
|
*/
|
||||||
|
public static final int DEFAULT_TEST_TIMEOUT = 180 * 1000;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The FS contract used for these tets
|
||||||
|
*/
|
||||||
|
private AbstractFSContract contract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The test filesystem extracted from it
|
||||||
|
*/
|
||||||
|
private FileSystem fileSystem;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The path for tests
|
||||||
|
*/
|
||||||
|
private Path testPath;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This must be implemented by all instantiated test cases
|
||||||
|
* -provide the FS contract
|
||||||
|
* @return the FS contract
|
||||||
|
*/
|
||||||
|
protected abstract AbstractFSContract createContract(Configuration conf);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the contract
|
||||||
|
* @return the contract, which will be non-null once the setup operation has
|
||||||
|
* succeeded
|
||||||
|
*/
|
||||||
|
protected AbstractFSContract getContract() {
|
||||||
|
return contract;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the filesystem created in startup
|
||||||
|
* @return the filesystem to use for tests
|
||||||
|
*/
|
||||||
|
public FileSystem getFileSystem() {
|
||||||
|
return fileSystem;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the log of the base class
|
||||||
|
* @return a logger
|
||||||
|
*/
|
||||||
|
public static Logger getLog() {
|
||||||
|
return LOG;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Skip a test if a feature is unsupported in this FS
|
||||||
|
* @param feature feature to look for
|
||||||
|
* @throws IOException IO problem
|
||||||
|
*/
|
||||||
|
protected void skipIfUnsupported(String feature) throws IOException {
|
||||||
|
if (!isSupported(feature)) {
|
||||||
|
skip("Skipping as unsupported feature: " + feature);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is a feature supported?
|
||||||
|
* @param feature feature
|
||||||
|
* @return true iff the feature is supported
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
protected boolean isSupported(String feature) throws IOException {
|
||||||
|
return contract.isSupported(feature, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Include at the start of tests to skip them if the FS is not enabled.
|
||||||
|
*/
|
||||||
|
protected void assumeEnabled() {
|
||||||
|
if (!contract.isEnabled())
|
||||||
|
throw new AssumptionViolatedException("test cases disabled for " + contract);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a configuration. May be overridden by tests/instantiations
|
||||||
|
* @return a configuration
|
||||||
|
*/
|
||||||
|
protected Configuration createConfiguration() {
|
||||||
|
return new Configuration();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the timeout for every test
|
||||||
|
*/
|
||||||
|
@Rule
|
||||||
|
public Timeout testTimeout = new Timeout(getTestTimeoutMillis());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Option for tests to override the default timeout value
|
||||||
|
* @return the current test timeout
|
||||||
|
*/
|
||||||
|
protected int getTestTimeoutMillis() {
|
||||||
|
return DEFAULT_TEST_TIMEOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup: create the contract then init it
|
||||||
|
* @throws Exception on any failure
|
||||||
|
*/
|
||||||
|
@Before
|
||||||
|
public void setup() throws Exception {
|
||||||
|
contract = createContract(createConfiguration());
|
||||||
|
contract.init();
|
||||||
|
//skip tests if they aren't enabled
|
||||||
|
assumeEnabled();
|
||||||
|
//extract the test FS
|
||||||
|
fileSystem = contract.getTestFileSystem();
|
||||||
|
assertNotNull("null filesystem", fileSystem);
|
||||||
|
URI fsURI = fileSystem.getUri();
|
||||||
|
LOG.info("Test filesystem = {} implemented by {}",
|
||||||
|
fsURI, fileSystem);
|
||||||
|
//sanity check to make sure that the test FS picked up really matches
|
||||||
|
//the scheme chosen. This is to avoid defaulting back to the localFS
|
||||||
|
//which would be drastic for root FS tests
|
||||||
|
assertEquals("wrong filesystem of " + fsURI,
|
||||||
|
contract.getScheme(), fsURI.getScheme());
|
||||||
|
//create the test path
|
||||||
|
testPath = getContract().getTestPath();
|
||||||
|
mkdirs(testPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Teardown
|
||||||
|
* @throws Exception on any failure
|
||||||
|
*/
|
||||||
|
@After
|
||||||
|
public void teardown() throws Exception {
|
||||||
|
deleteTestDirInTeardown();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete the test dir in the per-test teardown
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
protected void deleteTestDirInTeardown() throws IOException {
|
||||||
|
cleanup("TEARDOWN", getFileSystem(), testPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a path under the test path provided by
|
||||||
|
* the FS contract
|
||||||
|
* @param filepath path string in
|
||||||
|
* @return a path qualified by the test filesystem
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
protected Path path(String filepath) throws IOException {
|
||||||
|
return getFileSystem().makeQualified(
|
||||||
|
new Path(getContract().getTestPath(), filepath));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Take a simple path like "/something" and turn it into
|
||||||
|
* a qualified path against the test FS
|
||||||
|
* @param filepath path string in
|
||||||
|
* @return a path qualified by the test filesystem
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
protected Path absolutepath(String filepath) throws IOException {
|
||||||
|
return getFileSystem().makeQualified(new Path(filepath));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List a path in the test FS
|
||||||
|
* @param path path to list
|
||||||
|
* @return the contents of the path/dir
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
protected String ls(Path path) throws IOException {
|
||||||
|
return ContractTestUtils.ls(fileSystem, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Describe a test. This is a replacement for javadocs
|
||||||
|
* where the tests role is printed in the log output
|
||||||
|
* @param text description
|
||||||
|
*/
|
||||||
|
protected void describe(String text) {
|
||||||
|
LOG.info(text);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle the outcome of an operation not being the strictest
|
||||||
|
* exception desired, but one that, while still within the boundary
|
||||||
|
* of the contract, is a bit looser.
|
||||||
|
*
|
||||||
|
* If the FS contract says that they support the strictest exceptions,
|
||||||
|
* that is what they must return, and the exception here is rethrown
|
||||||
|
* @param action Action
|
||||||
|
* @param expectedException what was expected
|
||||||
|
* @param e exception that was received
|
||||||
|
*/
|
||||||
|
protected void handleRelaxedException(String action,
|
||||||
|
String expectedException,
|
||||||
|
Exception e) throws Exception {
|
||||||
|
if (getContract().isSupported(SUPPORTS_STRICT_EXCEPTIONS, false)) {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
LOG.warn("The expected exception {} was not the exception class" +
|
||||||
|
" raised on {}: {}", action , e.getClass(), expectedException, e);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle expected exceptions through logging and/or other actions
|
||||||
|
* @param e exception raised.
|
||||||
|
*/
|
||||||
|
protected void handleExpectedException(Exception e) {
|
||||||
|
getLog().debug("expected :{}" ,e, e);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* assert that a path exists
|
||||||
|
* @param message message to use in an assertion
|
||||||
|
* @param path path to probe
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public void assertPathExists(String message, Path path) throws IOException {
|
||||||
|
ContractTestUtils.assertPathExists(fileSystem, message, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* assert that a path does not
|
||||||
|
* @param message message to use in an assertion
|
||||||
|
* @param path path to probe
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public void assertPathDoesNotExist(String message, Path path) throws
|
||||||
|
IOException {
|
||||||
|
ContractTestUtils.assertPathDoesNotExist(fileSystem, message, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a file exists and whose {@link FileStatus} entry
|
||||||
|
* declares that this is a file and not a symlink or directory.
|
||||||
|
*
|
||||||
|
* @param filename name of the file
|
||||||
|
* @throws IOException IO problems during file operations
|
||||||
|
*/
|
||||||
|
protected void assertIsFile(Path filename) throws IOException {
|
||||||
|
ContractTestUtils.assertIsFile(fileSystem, filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a file exists and whose {@link FileStatus} entry
|
||||||
|
* declares that this is a file and not a symlink or directory.
|
||||||
|
*
|
||||||
|
* @param path name of the file
|
||||||
|
* @throws IOException IO problems during file operations
|
||||||
|
*/
|
||||||
|
protected void assertIsDirectory(Path path) throws IOException {
|
||||||
|
ContractTestUtils.assertIsDirectory(fileSystem, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a file exists and whose {@link FileStatus} entry
|
||||||
|
* declares that this is a file and not a symlink or directory.
|
||||||
|
*
|
||||||
|
* @throws IOException IO problems during file operations
|
||||||
|
*/
|
||||||
|
protected void mkdirs(Path path) throws IOException {
|
||||||
|
assertTrue("Failed to mkdir " + path, fileSystem.mkdirs(path));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a delete succeeded
|
||||||
|
* @param path path to delete
|
||||||
|
* @param recursive recursive flag
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
protected void assertDeleted(Path path, boolean recursive) throws
|
||||||
|
IOException {
|
||||||
|
ContractTestUtils.assertDeleted(fileSystem, path, recursive);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that the result value == -1; which implies
|
||||||
|
* that a read was successful
|
||||||
|
* @param text text to include in a message (usually the operation)
|
||||||
|
* @param result read result to validate
|
||||||
|
*/
|
||||||
|
protected void assertMinusOne(String text, int result) {
|
||||||
|
assertEquals(text + " wrong read result " + result, -1, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean rename(Path src, Path dst) throws IOException {
|
||||||
|
return getFileSystem().rename(src, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected String generateAndLogErrorListing(Path src, Path dst) throws
|
||||||
|
IOException {
|
||||||
|
FileSystem fs = getFileSystem();
|
||||||
|
getLog().error(
|
||||||
|
"src dir " + ContractTestUtils.ls(fs, src.getParent()));
|
||||||
|
String destDirLS = ContractTestUtils.ls(fs, dst.getParent());
|
||||||
|
if (fs.isDirectory(dst)) {
|
||||||
|
//include the dir into the listing
|
||||||
|
destDirLS = destDirLS + "\n" + ContractTestUtils.ls(fs, dst);
|
||||||
|
}
|
||||||
|
return destDirLS;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,170 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Options for contract tests: keys for FS-specific values,
|
||||||
|
* defaults.
|
||||||
|
*/
|
||||||
|
public interface ContractOptions {
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* name of the (optional) resource containing filesystem binding keys : {@value}
|
||||||
|
* If found, it it will be loaded
|
||||||
|
*/
|
||||||
|
String CONTRACT_OPTIONS_RESOURCE = "contract-test-options.xml";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prefix for all contract keys in the configuration files
|
||||||
|
*/
|
||||||
|
String FS_CONTRACT_KEY = "fs.contract.";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is a filesystem case sensitive.
|
||||||
|
* Some of the filesystems that say "no" here may mean
|
||||||
|
* that it varies from platform to platform -the localfs being the key
|
||||||
|
* example.
|
||||||
|
*/
|
||||||
|
String IS_CASE_SENSITIVE = "is-case-sensitive";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Blobstore flag. Implies it's not a real directory tree and
|
||||||
|
* consistency is below that which Hadoop expects
|
||||||
|
*/
|
||||||
|
String IS_BLOBSTORE = "is-blobstore";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that the FS can rename into directories that
|
||||||
|
* don't exist, creating them as needed.
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String RENAME_CREATES_DEST_DIRS = "rename-creates-dest-dirs";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that the FS does not follow the rename contract -and
|
||||||
|
* instead only returns false on a failure.
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String RENAME_OVERWRITES_DEST = "rename-overwrites-dest";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that the FS returns false if the destination exists
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String RENAME_RETURNS_FALSE_IF_DEST_EXISTS =
|
||||||
|
"rename-returns-false-if-dest-exists";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that the FS returns false on a rename
|
||||||
|
* if the source is missing
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String RENAME_RETURNS_FALSE_IF_SOURCE_MISSING =
|
||||||
|
"rename-returns-false-if-source-missing";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that append is supported
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_APPEND = "supports-append";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that renames are atomic
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_ATOMIC_RENAME = "supports-atomic-rename";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that directory deletes are atomic
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_ATOMIC_DIRECTORY_DELETE = "supports-atomic-directory-delete";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Does the FS support multiple block locations?
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_BLOCK_LOCALITY = "supports-block-locality";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Does the FS support the concat() operation?
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_CONCAT = "supports-concat";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is seeking supported at all?
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_SEEK = "supports-seek";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is seeking past the EOF allowed?
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String REJECTS_SEEK_PAST_EOF = "rejects-seek-past-eof";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Is seeking on a closed file supported? Some filesystems only raise an
|
||||||
|
* exception later, when trying to read.
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_SEEK_ON_CLOSED_FILE = "supports-seek-on-closed-file";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that this FS expects to throw the strictest
|
||||||
|
* exceptions it can, not generic IOEs, which, if returned,
|
||||||
|
* must be rejected.
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_STRICT_EXCEPTIONS = "supports-strict-exceptions";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Are unix permissions
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String SUPPORTS_UNIX_PERMISSIONS = "supports-unix-permissions";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maximum path length
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String MAX_PATH_ = "max-path";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maximum filesize: 0 or -1 for no limit
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String MAX_FILESIZE = "max-filesize";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag to indicate that tests on the root directories of a filesystem/
|
||||||
|
* object store are permitted
|
||||||
|
* @{value}
|
||||||
|
*/
|
||||||
|
String TEST_ROOT_TESTS_ENABLED = "test.root-tests-enabled";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Limit for #of random seeks to perform.
|
||||||
|
* Keep low for remote filesystems for faster tests
|
||||||
|
*/
|
||||||
|
String TEST_RANDOM_SEEK_COUNT = "test.random-seek-count";
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,759 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.internal.AssumptionViolatedException;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.EOFException;
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utilities used across test cases
|
||||||
|
*/
|
||||||
|
public class ContractTestUtils extends Assert {
|
||||||
|
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(ContractTestUtils.class);
|
||||||
|
|
||||||
|
public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a property in the property set matches the expected value
|
||||||
|
* @param props property set
|
||||||
|
* @param key property name
|
||||||
|
* @param expected expected value. If null, the property must not be in the set
|
||||||
|
*/
|
||||||
|
public static void assertPropertyEquals(Properties props,
|
||||||
|
String key,
|
||||||
|
String expected) {
|
||||||
|
String val = props.getProperty(key);
|
||||||
|
if (expected == null) {
|
||||||
|
assertNull("Non null property " + key + " = " + val, val);
|
||||||
|
} else {
|
||||||
|
assertEquals("property " + key + " = " + val,
|
||||||
|
expected,
|
||||||
|
val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* Write a file and read it in, validating the result. Optional flags control
|
||||||
|
* whether file overwrite operations should be enabled, and whether the
|
||||||
|
* file should be deleted afterwards.
|
||||||
|
*
|
||||||
|
* If there is a mismatch between what was written and what was expected,
|
||||||
|
* a small range of bytes either side of the first error are logged to aid
|
||||||
|
* diagnosing what problem occurred -whether it was a previous file
|
||||||
|
* or a corrupting of the current file. This assumes that two
|
||||||
|
* sequential runs to the same path use datasets with different character
|
||||||
|
* moduli.
|
||||||
|
*
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path to write to
|
||||||
|
* @param len length of data
|
||||||
|
* @param overwrite should the create option allow overwrites?
|
||||||
|
* @param delete should the file be deleted afterwards? -with a verification
|
||||||
|
* that it worked. Deletion is not attempted if an assertion has failed
|
||||||
|
* earlier -it is not in a <code>finally{}</code> block.
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static void writeAndRead(FileSystem fs,
|
||||||
|
Path path,
|
||||||
|
byte[] src,
|
||||||
|
int len,
|
||||||
|
int blocksize,
|
||||||
|
boolean overwrite,
|
||||||
|
boolean delete) throws IOException {
|
||||||
|
fs.mkdirs(path.getParent());
|
||||||
|
|
||||||
|
writeDataset(fs, path, src, len, blocksize, overwrite);
|
||||||
|
|
||||||
|
byte[] dest = readDataset(fs, path, len);
|
||||||
|
|
||||||
|
compareByteArrays(src, dest, len);
|
||||||
|
|
||||||
|
if (delete) {
|
||||||
|
rejectRootOperation(path);
|
||||||
|
boolean deleted = fs.delete(path, false);
|
||||||
|
assertTrue("Deleted", deleted);
|
||||||
|
assertPathDoesNotExist(fs, "Cleanup failed", path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Write a file.
|
||||||
|
* Optional flags control
|
||||||
|
* whether file overwrite operations should be enabled
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path to write to
|
||||||
|
* @param len length of data
|
||||||
|
* @param overwrite should the create option allow overwrites?
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static void writeDataset(FileSystem fs,
|
||||||
|
Path path,
|
||||||
|
byte[] src,
|
||||||
|
int len,
|
||||||
|
int buffersize,
|
||||||
|
boolean overwrite) throws IOException {
|
||||||
|
assertTrue(
|
||||||
|
"Not enough data in source array to write " + len + " bytes",
|
||||||
|
src.length >= len);
|
||||||
|
FSDataOutputStream out = fs.create(path,
|
||||||
|
overwrite,
|
||||||
|
fs.getConf()
|
||||||
|
.getInt(IO_FILE_BUFFER_SIZE,
|
||||||
|
4096),
|
||||||
|
(short) 1,
|
||||||
|
buffersize);
|
||||||
|
out.write(src, 0, len);
|
||||||
|
out.close();
|
||||||
|
assertFileHasLength(fs, path, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read the file and convert to a byte dataset.
|
||||||
|
* This implements readfully internally, so that it will read
|
||||||
|
* in the file without ever having to seek()
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path to read from
|
||||||
|
* @param len length of data to read
|
||||||
|
* @return the bytes
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static byte[] readDataset(FileSystem fs, Path path, int len)
|
||||||
|
throws IOException {
|
||||||
|
FSDataInputStream in = fs.open(path);
|
||||||
|
byte[] dest = new byte[len];
|
||||||
|
int offset =0;
|
||||||
|
int nread = 0;
|
||||||
|
try {
|
||||||
|
while (nread < len) {
|
||||||
|
int nbytes = in.read(dest, offset + nread, len - nread);
|
||||||
|
if (nbytes < 0) {
|
||||||
|
throw new EOFException("End of file reached before reading fully.");
|
||||||
|
}
|
||||||
|
nread += nbytes;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
in.close();
|
||||||
|
}
|
||||||
|
return dest;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read a file, verify its length and contents match the expected array
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path to file
|
||||||
|
* @param original original dataset
|
||||||
|
* @throws IOException IO Problems
|
||||||
|
*/
|
||||||
|
public static void verifyFileContents(FileSystem fs,
|
||||||
|
Path path,
|
||||||
|
byte[] original) throws IOException {
|
||||||
|
FileStatus stat = fs.getFileStatus(path);
|
||||||
|
String statText = stat.toString();
|
||||||
|
assertTrue("not a file " + statText, stat.isFile());
|
||||||
|
assertEquals("wrong length " + statText, original.length, stat.getLen());
|
||||||
|
byte[] bytes = readDataset(fs, path, original.length);
|
||||||
|
compareByteArrays(original,bytes,original.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that the read at a specific offset in a stream
|
||||||
|
* matches that expected
|
||||||
|
* @param stm stream
|
||||||
|
* @param fileContents original file contents
|
||||||
|
* @param seekOff seek offset
|
||||||
|
* @param toRead number of bytes to read
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static void verifyRead(FSDataInputStream stm, byte[] fileContents,
|
||||||
|
int seekOff, int toRead) throws IOException {
|
||||||
|
byte[] out = new byte[toRead];
|
||||||
|
stm.seek(seekOff);
|
||||||
|
stm.readFully(out);
|
||||||
|
byte[] expected = Arrays.copyOfRange(fileContents, seekOff,
|
||||||
|
seekOff + toRead);
|
||||||
|
compareByteArrays(expected, out,toRead);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that tthe array original[0..len] and received[] are equal.
|
||||||
|
* A failure triggers the logging of the bytes near where the first
|
||||||
|
* difference surfaces.
|
||||||
|
* @param original source data
|
||||||
|
* @param received actual
|
||||||
|
* @param len length of bytes to compare
|
||||||
|
*/
|
||||||
|
public static void compareByteArrays(byte[] original,
|
||||||
|
byte[] received,
|
||||||
|
int len) {
|
||||||
|
assertEquals("Number of bytes read != number written",
|
||||||
|
len, received.length);
|
||||||
|
int errors = 0;
|
||||||
|
int first_error_byte = -1;
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
if (original[i] != received[i]) {
|
||||||
|
if (errors == 0) {
|
||||||
|
first_error_byte = i;
|
||||||
|
}
|
||||||
|
errors++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (errors > 0) {
|
||||||
|
String message = String.format(" %d errors in file of length %d",
|
||||||
|
errors, len);
|
||||||
|
LOG.warn(message);
|
||||||
|
// the range either side of the first error to print
|
||||||
|
// this is a purely arbitrary number, to aid user debugging
|
||||||
|
final int overlap = 10;
|
||||||
|
for (int i = Math.max(0, first_error_byte - overlap);
|
||||||
|
i < Math.min(first_error_byte + overlap, len);
|
||||||
|
i++) {
|
||||||
|
byte actual = received[i];
|
||||||
|
byte expected = original[i];
|
||||||
|
String letter = toChar(actual);
|
||||||
|
String line = String.format("[%04d] %2x %s\n", i, actual, letter);
|
||||||
|
if (expected != actual) {
|
||||||
|
line = String.format("[%04d] %2x %s -expected %2x %s\n",
|
||||||
|
i,
|
||||||
|
actual,
|
||||||
|
letter,
|
||||||
|
expected,
|
||||||
|
toChar(expected));
|
||||||
|
}
|
||||||
|
LOG.warn(line);
|
||||||
|
}
|
||||||
|
fail(message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a byte to a character for printing. If the
|
||||||
|
* byte value is < 32 -and hence unprintable- the byte is
|
||||||
|
* returned as a two digit hex value
|
||||||
|
* @param b byte
|
||||||
|
* @return the printable character string
|
||||||
|
*/
|
||||||
|
public static String toChar(byte b) {
|
||||||
|
if (b >= 0x20) {
|
||||||
|
return Character.toString((char) b);
|
||||||
|
} else {
|
||||||
|
return String.format("%02x", b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a buffer to a string, character by character
|
||||||
|
* @param buffer input bytes
|
||||||
|
* @return a string conversion
|
||||||
|
*/
|
||||||
|
public static String toChar(byte[] buffer) {
|
||||||
|
StringBuilder builder = new StringBuilder(buffer.length);
|
||||||
|
for (byte b : buffer) {
|
||||||
|
builder.append(toChar(b));
|
||||||
|
}
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static byte[] toAsciiByteArray(String s) {
|
||||||
|
char[] chars = s.toCharArray();
|
||||||
|
int len = chars.length;
|
||||||
|
byte[] buffer = new byte[len];
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
buffer[i] = (byte) (chars[i] & 0xff);
|
||||||
|
}
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup at the end of a test run
|
||||||
|
* @param action action triggering the operation (for use in logging)
|
||||||
|
* @param fileSystem filesystem to work with. May be null
|
||||||
|
* @param cleanupPath path to delete as a string
|
||||||
|
*/
|
||||||
|
public static void cleanup(String action,
|
||||||
|
FileSystem fileSystem,
|
||||||
|
String cleanupPath) {
|
||||||
|
if (fileSystem == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Path path = new Path(cleanupPath).makeQualified(fileSystem.getUri(),
|
||||||
|
fileSystem.getWorkingDirectory());
|
||||||
|
cleanup(action, fileSystem, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cleanup at the end of a test run
|
||||||
|
* @param action action triggering the operation (for use in logging)
|
||||||
|
* @param fileSystem filesystem to work with. May be null
|
||||||
|
* @param path path to delete
|
||||||
|
*/
|
||||||
|
public static void cleanup(String action, FileSystem fileSystem, Path path) {
|
||||||
|
noteAction(action);
|
||||||
|
try {
|
||||||
|
rm(fileSystem, path, true, false);
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.error("Error deleting in "+ action + " - " + path + ": " + e, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a directory. There's a safety check for operations against the
|
||||||
|
* root directory -these are intercepted and rejected with an IOException
|
||||||
|
* unless the allowRootDelete flag is true
|
||||||
|
* @param fileSystem filesystem to work with. May be null
|
||||||
|
* @param path path to delete
|
||||||
|
* @param recursive flag to enable recursive delete
|
||||||
|
* @param allowRootDelete can the root directory be deleted?
|
||||||
|
* @throws IOException on any problem.
|
||||||
|
*/
|
||||||
|
public static boolean rm(FileSystem fileSystem,
|
||||||
|
Path path,
|
||||||
|
boolean recursive,
|
||||||
|
boolean allowRootDelete) throws
|
||||||
|
IOException {
|
||||||
|
if (fileSystem != null) {
|
||||||
|
rejectRootOperation(path, allowRootDelete);
|
||||||
|
if (fileSystem.exists(path)) {
|
||||||
|
return fileSystem.delete(path, recursive);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Block any operation on the root path. This is a safety check
|
||||||
|
* @param path path in the filesystem
|
||||||
|
* @param allowRootOperation can the root directory be manipulated?
|
||||||
|
* @throws IOException if the operation was rejected
|
||||||
|
*/
|
||||||
|
public static void rejectRootOperation(Path path,
|
||||||
|
boolean allowRootOperation) throws IOException {
|
||||||
|
if (path.isRoot() && !allowRootOperation) {
|
||||||
|
throw new IOException("Root directory operation rejected: " + path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Block any operation on the root path. This is a safety check
|
||||||
|
* @param path path in the filesystem
|
||||||
|
* @throws IOException if the operation was rejected
|
||||||
|
*/
|
||||||
|
public static void rejectRootOperation(Path path) throws IOException {
|
||||||
|
rejectRootOperation(path, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public static void noteAction(String action) {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("============== "+ action +" =============");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* downgrade a failure to a message and a warning, then an
|
||||||
|
* exception for the Junit test runner to mark as failed
|
||||||
|
* @param message text message
|
||||||
|
* @param failure what failed
|
||||||
|
* @throws AssumptionViolatedException always
|
||||||
|
*/
|
||||||
|
public static void downgrade(String message, Throwable failure) {
|
||||||
|
LOG.warn("Downgrading test " + message, failure);
|
||||||
|
AssumptionViolatedException ave =
|
||||||
|
new AssumptionViolatedException(failure, null);
|
||||||
|
throw ave;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* report an overridden test as unsupported
|
||||||
|
* @param message message to use in the text
|
||||||
|
* @throws AssumptionViolatedException always
|
||||||
|
*/
|
||||||
|
public static void unsupported(String message) {
|
||||||
|
skip(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* report a test has been skipped for some reason
|
||||||
|
* @param message message to use in the text
|
||||||
|
* @throws AssumptionViolatedException always
|
||||||
|
*/
|
||||||
|
public static void skip(String message) {
|
||||||
|
LOG.info("Skipping: {}", message);
|
||||||
|
throw new AssumptionViolatedException(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fail with an exception that was received
|
||||||
|
* @param text text to use in the exception
|
||||||
|
* @param thrown a (possibly null) throwable to init the cause with
|
||||||
|
* @throws AssertionError with the text and throwable -always
|
||||||
|
*/
|
||||||
|
public static void fail(String text, Throwable thrown) {
|
||||||
|
AssertionError e = new AssertionError(text);
|
||||||
|
e.initCause(thrown);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make an assertion about the length of a file
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path of the file
|
||||||
|
* @param expected expected length
|
||||||
|
* @throws IOException on File IO problems
|
||||||
|
*/
|
||||||
|
public static void assertFileHasLength(FileSystem fs, Path path,
|
||||||
|
int expected) throws IOException {
|
||||||
|
FileStatus status = fs.getFileStatus(path);
|
||||||
|
assertEquals(
|
||||||
|
"Wrong file length of file " + path + " status: " + status,
|
||||||
|
expected,
|
||||||
|
status.getLen());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a path refers to a directory
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path of the directory
|
||||||
|
* @throws IOException on File IO problems
|
||||||
|
*/
|
||||||
|
public static void assertIsDirectory(FileSystem fs,
|
||||||
|
Path path) throws IOException {
|
||||||
|
FileStatus fileStatus = fs.getFileStatus(path);
|
||||||
|
assertIsDirectory(fileStatus);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a path refers to a directory
|
||||||
|
* @param fileStatus stats to check
|
||||||
|
*/
|
||||||
|
public static void assertIsDirectory(FileStatus fileStatus) {
|
||||||
|
assertTrue("Should be a directory -but isn't: " + fileStatus,
|
||||||
|
fileStatus.isDirectory());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Write the text to a file, returning the converted byte array
|
||||||
|
* for use in validating the round trip
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path of file
|
||||||
|
* @param text text to write
|
||||||
|
* @param overwrite should the operation overwrite any existing file?
|
||||||
|
* @return the read bytes
|
||||||
|
* @throws IOException on IO problems
|
||||||
|
*/
|
||||||
|
public static byte[] writeTextFile(FileSystem fs,
|
||||||
|
Path path,
|
||||||
|
String text,
|
||||||
|
boolean overwrite) throws IOException {
|
||||||
|
byte[] bytes = new byte[0];
|
||||||
|
if (text != null) {
|
||||||
|
bytes = toAsciiByteArray(text);
|
||||||
|
}
|
||||||
|
createFile(fs, path, overwrite, bytes);
|
||||||
|
return bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a file
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path to write
|
||||||
|
* @param overwrite overwrite flag
|
||||||
|
* @param data source dataset. Can be null
|
||||||
|
* @throws IOException on any problem
|
||||||
|
*/
|
||||||
|
public static void createFile(FileSystem fs,
|
||||||
|
Path path,
|
||||||
|
boolean overwrite,
|
||||||
|
byte[] data) throws IOException {
|
||||||
|
FSDataOutputStream stream = fs.create(path, overwrite);
|
||||||
|
if (data != null && data.length > 0) {
|
||||||
|
stream.write(data);
|
||||||
|
}
|
||||||
|
stream.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Touch a file
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static void touch(FileSystem fs,
|
||||||
|
Path path) throws IOException {
|
||||||
|
createFile(fs, path, true, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a file/dir and assert that delete() returned true
|
||||||
|
* <i>and</i> that the path no longer exists. This variant rejects
|
||||||
|
* all operations on root directories
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param file path to delete
|
||||||
|
* @param recursive flag to enable recursive delete
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static void assertDeleted(FileSystem fs,
|
||||||
|
Path file,
|
||||||
|
boolean recursive) throws IOException {
|
||||||
|
assertDeleted(fs, file, recursive, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a file/dir and assert that delete() returned true
|
||||||
|
* <i>and</i> that the path no longer exists. This variant rejects
|
||||||
|
* all operations on root directories
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param file path to delete
|
||||||
|
* @param recursive flag to enable recursive delete
|
||||||
|
* @param allowRootOperations can the root dir be deleted?
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static void assertDeleted(FileSystem fs,
|
||||||
|
Path file,
|
||||||
|
boolean recursive,
|
||||||
|
boolean allowRootOperations) throws IOException {
|
||||||
|
rejectRootOperation(file, allowRootOperations);
|
||||||
|
assertPathExists(fs, "about to be deleted file", file);
|
||||||
|
boolean deleted = fs.delete(file, recursive);
|
||||||
|
String dir = ls(fs, file.getParent());
|
||||||
|
assertTrue("Delete failed on " + file + ": " + dir, deleted);
|
||||||
|
assertPathDoesNotExist(fs, "Deleted file", file);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read in "length" bytes, convert to an ascii string
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param path path to read
|
||||||
|
* @param length #of bytes to read.
|
||||||
|
* @return the bytes read and converted to a string
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static String readBytesToString(FileSystem fs,
|
||||||
|
Path path,
|
||||||
|
int length) throws IOException {
|
||||||
|
FSDataInputStream in = fs.open(path);
|
||||||
|
try {
|
||||||
|
byte[] buf = new byte[length];
|
||||||
|
in.readFully(0, buf);
|
||||||
|
return toChar(buf);
|
||||||
|
} finally {
|
||||||
|
in.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Take an array of filestats and convert to a string (prefixed w/ a [01] counter
|
||||||
|
* @param stats array of stats
|
||||||
|
* @param separator separator after every entry
|
||||||
|
* @return a stringified set
|
||||||
|
*/
|
||||||
|
public static String fileStatsToString(FileStatus[] stats, String separator) {
|
||||||
|
StringBuilder buf = new StringBuilder(stats.length * 128);
|
||||||
|
for (int i = 0; i < stats.length; i++) {
|
||||||
|
buf.append(String.format("[%02d] %s", i, stats[i])).append(separator);
|
||||||
|
}
|
||||||
|
return buf.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List a directory
|
||||||
|
* @param fileSystem FS
|
||||||
|
* @param path path
|
||||||
|
* @return a directory listing or failure message
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static String ls(FileSystem fileSystem, Path path) throws IOException {
|
||||||
|
if (path == null) {
|
||||||
|
//surfaces when someone calls getParent() on something at the top of the path
|
||||||
|
return "/";
|
||||||
|
}
|
||||||
|
FileStatus[] stats;
|
||||||
|
String pathtext = "ls " + path;
|
||||||
|
try {
|
||||||
|
stats = fileSystem.listStatus(path);
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
return pathtext + " -file not found";
|
||||||
|
} catch (IOException e) {
|
||||||
|
return pathtext + " -failed: " + e;
|
||||||
|
}
|
||||||
|
return dumpStats(pathtext, stats);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String dumpStats(String pathname, FileStatus[] stats) {
|
||||||
|
return pathname + fileStatsToString(stats, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a file exists and whose {@link FileStatus} entry
|
||||||
|
* declares that this is a file and not a symlink or directory.
|
||||||
|
* @param fileSystem filesystem to resolve path against
|
||||||
|
* @param filename name of the file
|
||||||
|
* @throws IOException IO problems during file operations
|
||||||
|
*/
|
||||||
|
public static void assertIsFile(FileSystem fileSystem, Path filename) throws
|
||||||
|
IOException {
|
||||||
|
assertPathExists(fileSystem, "Expected file", filename);
|
||||||
|
FileStatus status = fileSystem.getFileStatus(filename);
|
||||||
|
assertIsFile(filename, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a file exists and whose {@link FileStatus} entry
|
||||||
|
* declares that this is a file and not a symlink or directory.
|
||||||
|
* @param filename name of the file
|
||||||
|
* @param status file status
|
||||||
|
*/
|
||||||
|
public static void assertIsFile(Path filename, FileStatus status) {
|
||||||
|
String fileInfo = filename + " " + status;
|
||||||
|
assertFalse("File claims to be a directory " + fileInfo,
|
||||||
|
status.isDirectory());
|
||||||
|
assertFalse("File claims to be a symlink " + fileInfo,
|
||||||
|
status.isSymlink());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a dataset for use in the tests; all data is in the range
|
||||||
|
* base to (base+modulo-1) inclusive
|
||||||
|
* @param len length of data
|
||||||
|
* @param base base of the data
|
||||||
|
* @param modulo the modulo
|
||||||
|
* @return the newly generated dataset
|
||||||
|
*/
|
||||||
|
public static byte[] dataset(int len, int base, int modulo) {
|
||||||
|
byte[] dataset = new byte[len];
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
dataset[i] = (byte) (base + (i % modulo));
|
||||||
|
}
|
||||||
|
return dataset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a path exists -but make no assertions as to the
|
||||||
|
* type of that entry
|
||||||
|
*
|
||||||
|
* @param fileSystem filesystem to examine
|
||||||
|
* @param message message to include in the assertion failure message
|
||||||
|
* @param path path in the filesystem
|
||||||
|
* @throws FileNotFoundException raised if the path is missing
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static void assertPathExists(FileSystem fileSystem, String message,
|
||||||
|
Path path) throws IOException {
|
||||||
|
if (!fileSystem.exists(path)) {
|
||||||
|
//failure, report it
|
||||||
|
ls(fileSystem, path.getParent());
|
||||||
|
throw new FileNotFoundException(message + ": not found " + path
|
||||||
|
+ " in " + path.getParent());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a path does not exist
|
||||||
|
*
|
||||||
|
* @param fileSystem filesystem to examine
|
||||||
|
* @param message message to include in the assertion failure message
|
||||||
|
* @param path path in the filesystem
|
||||||
|
* @throws IOException IO problems
|
||||||
|
*/
|
||||||
|
public static void assertPathDoesNotExist(FileSystem fileSystem,
|
||||||
|
String message,
|
||||||
|
Path path) throws IOException {
|
||||||
|
try {
|
||||||
|
FileStatus status = fileSystem.getFileStatus(path);
|
||||||
|
fail(message + ": unexpectedly found " + path + " as " + status);
|
||||||
|
} catch (FileNotFoundException expected) {
|
||||||
|
//this is expected
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a FileSystem.listStatus on a dir finds the subdir/child entry
|
||||||
|
* @param fs filesystem
|
||||||
|
* @param dir directory to scan
|
||||||
|
* @param subdir full path to look for
|
||||||
|
* @throws IOException IO probles
|
||||||
|
*/
|
||||||
|
public static void assertListStatusFinds(FileSystem fs,
|
||||||
|
Path dir,
|
||||||
|
Path subdir) throws IOException {
|
||||||
|
FileStatus[] stats = fs.listStatus(dir);
|
||||||
|
boolean found = false;
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
for (FileStatus stat : stats) {
|
||||||
|
builder.append(stat.toString()).append('\n');
|
||||||
|
if (stat.getPath().equals(subdir)) {
|
||||||
|
found = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertTrue("Path " + subdir
|
||||||
|
+ " not found in directory " + dir + ":" + builder,
|
||||||
|
found);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test for the host being an OSX machine
|
||||||
|
* @return true if the JVM thinks that is running on OSX
|
||||||
|
*/
|
||||||
|
public static boolean isOSX() {
|
||||||
|
return System.getProperty("os.name").contains("OS X");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* compare content of file operations using a double byte array
|
||||||
|
* @param concat concatenated files
|
||||||
|
* @param bytes bytes
|
||||||
|
*/
|
||||||
|
public static void validateFileContent(byte[] concat, byte[][] bytes) {
|
||||||
|
int idx = 0;
|
||||||
|
boolean mismatch = false;
|
||||||
|
|
||||||
|
for (byte[] bb : bytes) {
|
||||||
|
for (byte b : bb) {
|
||||||
|
if (b != concat[idx++]) {
|
||||||
|
mismatch = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (mismatch)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
assertFalse("File content of file is not as expected at offset " + idx,
|
||||||
|
mismatch);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,63 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.ftp;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
|
||||||
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import java.net.URI;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The contract of FTP; requires the option "test.testdir" to be set
|
||||||
|
*/
|
||||||
|
public class FTPContract extends AbstractBondedFSContract {
|
||||||
|
|
||||||
|
public static final String CONTRACT_XML = "contract/ftp.xml";
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public static final String TEST_FS_TESTDIR = "test.ftp.testdir";
|
||||||
|
private String fsName;
|
||||||
|
private URI fsURI;
|
||||||
|
private FileSystem fs;
|
||||||
|
|
||||||
|
public FTPContract(Configuration conf) {
|
||||||
|
super(conf);
|
||||||
|
//insert the base features
|
||||||
|
addConfResource(CONTRACT_XML);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getScheme() {
|
||||||
|
return "ftp";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Path getTestPath() {
|
||||||
|
String pathString = getOption(TEST_FS_TESTDIR, null);
|
||||||
|
assertNotNull("Undefined test option " + TEST_FS_TESTDIR, pathString);
|
||||||
|
Path path = new Path(pathString);
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.ftp;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestFTPContractCreate extends AbstractContractCreateTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new FTPContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.ftp;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestFTPContractDelete extends AbstractContractDeleteTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new FTPContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.ftp;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test dir operations on a the local FS.
|
||||||
|
*/
|
||||||
|
public class TestFTPContractMkdir extends AbstractContractMkdirTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new FTPContract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.ftp;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestFTPContractOpen extends AbstractContractOpenTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new FTPContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.ftp;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
import org.apache.hadoop.fs.ftp.FTPFileSystem;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class TestFTPContractRename extends AbstractContractRenameTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new FTPContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check the exception was about cross-directory renames
|
||||||
|
* -if not, rethrow it.
|
||||||
|
* @param e exception raised
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private void verifyUnsupportedDirRenameException(IOException e) throws IOException {
|
||||||
|
if (!e.toString().contains(FTPFileSystem.E_SAME_DIRECTORY_ONLY)) {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void testRenameDirIntoExistingDir() throws Throwable {
|
||||||
|
try {
|
||||||
|
super.testRenameDirIntoExistingDir();
|
||||||
|
fail("Expected a failure");
|
||||||
|
} catch (IOException e) {
|
||||||
|
verifyUnsupportedDirRenameException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void testRenameFileNonexistentDir() throws Throwable {
|
||||||
|
try {
|
||||||
|
super.testRenameFileNonexistentDir();
|
||||||
|
fail("Expected a failure");
|
||||||
|
} catch (IOException e) {
|
||||||
|
verifyUnsupportedDirRenameException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<!--
|
||||||
|
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
~ or more contributor license agreements. See the NOTICE file
|
||||||
|
~ distributed with this work for additional information
|
||||||
|
~ regarding copyright ownership. The ASF licenses this file
|
||||||
|
~ to you under the Apache License, Version 2.0 (the
|
||||||
|
~ "License"); you may not use this file except in compliance
|
||||||
|
~ with the License. You may obtain a copy of the License at
|
||||||
|
~
|
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~
|
||||||
|
~ Unless required by applicable law or agreed to in writing, software
|
||||||
|
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
~ See the License for the specific language governing permissions and
|
||||||
|
~ limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>FTP Contract Tests</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>FTP Contract</h1>
|
||||||
|
|
||||||
|
This package contains tests that verify the FTP filesystem works close to what
|
||||||
|
a Hadoop application expects.
|
||||||
|
<p></p>
|
||||||
|
All these tests are skipped unless a test filesystem is provided
|
||||||
|
in <code>hadoop-common/src/test/resources/core-site.xml</code>
|
||||||
|
<pre>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.ftp.contract.test.fs.name</name>
|
||||||
|
<value>ftp://ftpserver/</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.ftp.contract.test.testdir</name>
|
||||||
|
<value>/home/testuser/test</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.ftp.user.ftpserver</name>
|
||||||
|
<value>testuser</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.ftp.password.ftpserver</name>
|
||||||
|
<value>remember-not-to-check-this-file-in</value>
|
||||||
|
</property>
|
||||||
|
</pre>
|
||||||
|
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,116 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
import org.apache.hadoop.fs.contract.ContractOptions;
|
||||||
|
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||||
|
import org.apache.hadoop.util.Shell;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The contract of the Local filesystem.
|
||||||
|
* This changes its feature set from platform for platform -the default
|
||||||
|
* set is updated during initialization.
|
||||||
|
*
|
||||||
|
* This contract contains some override points, to permit
|
||||||
|
* the raw local filesystem and other filesystems to subclass it.
|
||||||
|
*/
|
||||||
|
public class LocalFSContract extends AbstractFSContract {
|
||||||
|
|
||||||
|
public static final String CONTRACT_XML = "contract/localfs.xml";
|
||||||
|
public static final String SYSPROP_TEST_BUILD_DATA = "test.build.data";
|
||||||
|
public static final String DEFAULT_TEST_BUILD_DATA_DIR = "test/build/data";
|
||||||
|
private FileSystem fs;
|
||||||
|
|
||||||
|
public LocalFSContract(Configuration conf) {
|
||||||
|
super(conf);
|
||||||
|
//insert the base features
|
||||||
|
addConfResource(getContractXml());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the contract file for this filesystem
|
||||||
|
* @return the XML
|
||||||
|
*/
|
||||||
|
protected String getContractXml() {
|
||||||
|
return CONTRACT_XML;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void init() throws IOException {
|
||||||
|
super.init();
|
||||||
|
fs = getLocalFS();
|
||||||
|
adjustContractToLocalEnvironment();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tweak some of the contract parameters based on the local system
|
||||||
|
* state
|
||||||
|
*/
|
||||||
|
protected void adjustContractToLocalEnvironment() {
|
||||||
|
if (Shell.WINDOWS) {
|
||||||
|
//NTFS doesn't do case sensitivity, and its permissions are ACL-based
|
||||||
|
getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
|
||||||
|
getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
|
||||||
|
} else if (ContractTestUtils.isOSX()) {
|
||||||
|
//OSX HFS+ is not case sensitive
|
||||||
|
getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the local filesystem. This may be overridden
|
||||||
|
* @return the filesystem
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
protected FileSystem getLocalFS() throws IOException {
|
||||||
|
return FileSystem.getLocal(getConf());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FileSystem getTestFileSystem() throws IOException {
|
||||||
|
return fs;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getScheme() {
|
||||||
|
return "file";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Path getTestPath() {
|
||||||
|
Path path = fs.makeQualified(new Path(
|
||||||
|
getTestDataDir()));
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the test data directory
|
||||||
|
* @return the directory for test data
|
||||||
|
*/
|
||||||
|
protected String getTestDataDir() {
|
||||||
|
return System.getProperty(SYSPROP_TEST_BUILD_DATA, DEFAULT_TEST_BUILD_DATA_DIR);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestLocalFSContractAppend extends AbstractContractAppendTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestLocalFSContractCreate extends AbstractContractCreateTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestLocalFSContractDelete extends AbstractContractDeleteTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.net.URL;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* just here to make sure that the local.xml resource is actually loading
|
||||||
|
*/
|
||||||
|
public class TestLocalFSContractLoaded extends AbstractFSContractTestBase {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testContractWorks() throws Throwable {
|
||||||
|
String key = getContract().getConfKey(SUPPORTS_ATOMIC_RENAME);
|
||||||
|
assertNotNull("not set: " + key, getContract().getConf().get(key));
|
||||||
|
assertTrue("not true: " + key,
|
||||||
|
getContract().isSupported(SUPPORTS_ATOMIC_RENAME, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testContractResourceOnClasspath() throws Throwable {
|
||||||
|
URL url = this.getClass()
|
||||||
|
.getClassLoader()
|
||||||
|
.getResource(LocalFSContract.CONTRACT_XML);
|
||||||
|
assertNotNull("could not find contract resource", url);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test dir operations on a the local FS.
|
||||||
|
*/
|
||||||
|
public class TestLocalFSContractMkdir extends AbstractContractMkdirTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestLocalFSContractOpen extends AbstractContractOpenTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestLocalFSContractRename extends AbstractContractRenameTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.localfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestLocalFSContractSeek extends AbstractContractSeekTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new LocalFSContract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.contract.localfs.LocalFSContract;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Raw local filesystem. This is the inner OS-layer FS
|
||||||
|
* before checksumming is added around it.
|
||||||
|
*/
|
||||||
|
public class RawlocalFSContract extends LocalFSContract {
|
||||||
|
public RawlocalFSContract(Configuration conf) {
|
||||||
|
super(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static final String RAW_CONTRACT_XML = "contract/localfs.xml";
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected String getContractXml() {
|
||||||
|
return RAW_CONTRACT_XML;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected FileSystem getLocalFS() throws IOException {
|
||||||
|
return FileSystem.getLocal(getConf()).getRawFileSystem();
|
||||||
|
}
|
||||||
|
|
||||||
|
public File getTestDirectory() {
|
||||||
|
return new File(getTestDataDir());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,49 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
|
||||||
|
public class TestRawLocalContractUnderlyingFileBehavior extends Assert {
|
||||||
|
|
||||||
|
private static File testDirectory;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void before() {
|
||||||
|
RawlocalFSContract contract =
|
||||||
|
new RawlocalFSContract(new Configuration());
|
||||||
|
testDirectory = contract.getTestDirectory();
|
||||||
|
testDirectory.mkdirs();
|
||||||
|
assertTrue(testDirectory.isDirectory());
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeleteEmptyPath() throws Throwable {
|
||||||
|
File nonexistent = new File(testDirectory, "testDeleteEmptyPath");
|
||||||
|
assertFalse(nonexistent.exists());
|
||||||
|
assertFalse("nonexistent.delete() returned true", nonexistent.delete());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestRawlocalContractAppend extends AbstractContractAppendTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new RawlocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestRawlocalContractCreate extends AbstractContractCreateTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new RawlocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestRawlocalContractDelete extends AbstractContractDeleteTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new RawlocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test dir operations on a the local FS.
|
||||||
|
*/
|
||||||
|
public class TestRawlocalContractMkdir extends AbstractContractMkdirTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new RawlocalFSContract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestRawlocalContractOpen extends AbstractContractOpenTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new RawlocalFSContract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestRawlocalContractRename extends AbstractContractRenameTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new RawlocalFSContract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.rawlocal;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestRawlocalContractSeek extends AbstractContractSeekTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new RawlocalFSContract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.s3n;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The contract of S3N: only enabled if the test bucket is provided
|
||||||
|
*/
|
||||||
|
public class NativeS3Contract extends AbstractBondedFSContract {
|
||||||
|
|
||||||
|
public static final String CONTRACT_XML = "contract/s3n.xml";
|
||||||
|
|
||||||
|
|
||||||
|
public NativeS3Contract(Configuration conf) {
|
||||||
|
super(conf);
|
||||||
|
//insert the base features
|
||||||
|
addConfResource(CONTRACT_XML);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getScheme() {
|
||||||
|
return "s3n";
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.s3n;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||||
|
|
||||||
|
public class TestS3NContractCreate extends AbstractContractCreateTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new NativeS3Contract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void testOverwriteEmptyDirectory() throws Throwable {
|
||||||
|
ContractTestUtils.skip(
|
||||||
|
"blobstores can't distinguish empty directories from files");
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.s3n;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestS3NContractDelete extends AbstractContractDeleteTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new NativeS3Contract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.s3n;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test dir operations on S3
|
||||||
|
*/
|
||||||
|
public class TestS3NContractMkdir extends AbstractContractMkdirTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new NativeS3Contract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.s3n;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestS3NContractOpen extends AbstractContractOpenTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new NativeS3Contract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.s3n;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestS3NContractRename extends AbstractContractRenameTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new NativeS3Contract(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.s3n;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* root dir operations against an S3 bucket
|
||||||
|
*/
|
||||||
|
public class TestS3NContractRootDir extends
|
||||||
|
AbstractContractRootDirectoryTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new NativeS3Contract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.contract.s3n;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
|
||||||
|
import org.apache.hadoop.fs.contract.AbstractFSContract;
|
||||||
|
|
||||||
|
public class TestS3NContractSeek extends AbstractContractSeekTest {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected AbstractFSContract createContract(Configuration conf) {
|
||||||
|
return new NativeS3Contract(conf);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,84 @@
|
||||||
|
<!--
|
||||||
|
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
~ or more contributor license agreements. See the NOTICE file
|
||||||
|
~ distributed with this work for additional information
|
||||||
|
~ regarding copyright ownership. The ASF licenses this file
|
||||||
|
~ to you under the Apache License, Version 2.0 (the
|
||||||
|
~ "License"); you may not use this file except in compliance
|
||||||
|
~ with the License. You may obtain a copy of the License at
|
||||||
|
~
|
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~
|
||||||
|
~ Unless required by applicable law or agreed to in writing, software
|
||||||
|
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
~ See the License for the specific language governing permissions and
|
||||||
|
~ limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
<!--
|
||||||
|
FTP -these options are for testing against a remote unix filesystem.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.root-tests-enabled</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.is-blobstore</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.is-case-sensitive</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-append</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-atomic-directory-delete</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-atomic-rename</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-block-locality</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-concat</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-seek</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rejects-seek-past-eof</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-strict-exceptions</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-unix-permissions</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
</configuration>
|
|
@ -0,0 +1,110 @@
|
||||||
|
<!--
|
||||||
|
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
~ or more contributor license agreements. See the NOTICE file
|
||||||
|
~ distributed with this work for additional information
|
||||||
|
~ regarding copyright ownership. The ASF licenses this file
|
||||||
|
~ to you under the Apache License, Version 2.0 (the
|
||||||
|
~ "License"); you may not use this file except in compliance
|
||||||
|
~ with the License. You may obtain a copy of the License at
|
||||||
|
~
|
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~
|
||||||
|
~ Unless required by applicable law or agreed to in writing, software
|
||||||
|
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
~ See the License for the specific language governing permissions and
|
||||||
|
~ limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
case sensitivity and permission options are determined at run time from OS type
|
||||||
|
-->
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.is-case-sensitive</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-unix-permissions</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
The remaining options are static
|
||||||
|
-->
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.root-tests-enabled</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.random-seek-count</name>
|
||||||
|
<value>1000</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rename-creates-dest-dirs</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rename-overwrites-dest</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
|
||||||
|
<!--
|
||||||
|
checksummed filesystems do not support append; see HADOOP-4292
|
||||||
|
-->
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-append</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-atomic-directory-delete</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-atomic-rename</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-block-locality</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-concat</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-seek</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-seek-on-closed-file</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<!-- checksum FS doesn't allow seeing past EOF -->
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rejects-seek-past-eof</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-strict-exceptions</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
</configuration>
|
|
@ -0,0 +1,101 @@
|
||||||
|
<!--
|
||||||
|
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
~ or more contributor license agreements. See the NOTICE file
|
||||||
|
~ distributed with this work for additional information
|
||||||
|
~ regarding copyright ownership. The ASF licenses this file
|
||||||
|
~ to you under the Apache License, Version 2.0 (the
|
||||||
|
~ "License"); you may not use this file except in compliance
|
||||||
|
~ with the License. You may obtain a copy of the License at
|
||||||
|
~
|
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~
|
||||||
|
~ Unless required by applicable law or agreed to in writing, software
|
||||||
|
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
~ See the License for the specific language governing permissions and
|
||||||
|
~ limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
<!--
|
||||||
|
Here are most of the local FS contract options.
|
||||||
|
some of them may be overridden at run time based on the OS,
|
||||||
|
others potentially generated.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.root-tests-enabled</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.random-seek-count</name>
|
||||||
|
<value>1000</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.is-case-sensitive</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-append</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-atomic-directory-delete</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-atomic-rename</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-block-locality</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-concat</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rename-creates-dest-dirs</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rename-overwrites-dest</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-seek</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-seek-on-closed-file</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rejects-seek-past-eof</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-strict-exceptions</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-unix-permissions</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
</configuration>
|
|
@ -0,0 +1,95 @@
|
||||||
|
<!--
|
||||||
|
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
~ or more contributor license agreements. See the NOTICE file
|
||||||
|
~ distributed with this work for additional information
|
||||||
|
~ regarding copyright ownership. The ASF licenses this file
|
||||||
|
~ to you under the Apache License, Version 2.0 (the
|
||||||
|
~ "License"); you may not use this file except in compliance
|
||||||
|
~ with the License. You may obtain a copy of the License at
|
||||||
|
~
|
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~
|
||||||
|
~ Unless required by applicable law or agreed to in writing, software
|
||||||
|
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
~ See the License for the specific language governing permissions and
|
||||||
|
~ limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<configuration>
|
||||||
|
<!--
|
||||||
|
S3N is a blobstore, with very different behavior than a
|
||||||
|
classic filesystem.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.root-tests-enabled</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.test.random-seek-count</name>
|
||||||
|
<value>10</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.is-blobstore</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.is-case-sensitive</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rename-returns-false-if-source-missing</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-append</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-atomic-directory-delete</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-atomic-rename</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-block-locality</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-concat</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-seek</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.rejects-seek-past-eof</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-strict-exceptions</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.contract.supports-unix-permissions</name>
|
||||||
|
<value>false</value>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
</configuration>
|
Loading…
Reference in New Issue