diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 6c5f9384267..62681caa466 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -274,7 +274,7 @@ public class RawLocalFileSystem extends FileSystem {
Progressable progress) throws IOException {
FileStatus status = getFileStatus(f);
if (status.isDirectory()) {
- throw new IOException("Cannot append to a diretory (=" + f + " )");
+ throw new FileAlreadyExistsException("Cannot append to a directory: " + f);
}
return new FSDataOutputStream(new BufferedOutputStream(
createOutputStreamWithMode(f, true, null), bufferSize), statistics,
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 20e9b5a21fa..67c2d5bc631 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1527,16 +1527,31 @@
SAS keys to communicate with Azure storage.
+
fs.abfs.impl
org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem
The implementation class of the Azure Blob Filesystem
+
fs.abfss.impl
org.apache.hadoop.fs.azurebfs.SecureAzureBlobFileSystem
The implementation class of the Secure Azure Blob Filesystem
+
+
+ fs.AbstractFileSystem.abfs.impl
+ org.apache.hadoop.fs.azurebfs.Abfs
+ AbstractFileSystem implementation class of abfs://
+
+
+
+ fs.AbstractFileSystem.abfss.impl
+ org.apache.hadoop.fs.azurebfs.Abfss
+ AbstractFileSystem implementation class of abfss://
+
+
fs.azure.local.sas.key.mode
false
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index fdac610afbb..8897019af82 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -537,15 +537,6 @@ atomic. The combined operation, including `mkdirs(parent(F))` MAY be atomic.
The return value is always true—even if a new directory is not created
(this is defined in HDFS).
-#### Implementation Notes: Local FileSystem
-
-The local FileSystem does not raise an exception if `mkdirs(p)` is invoked
-on a path that exists and is a file. Instead the operation returns false.
-
- if isFile(FS, p):
- FS' = FS
- result = False
-
### `FSDataOutputStream create(Path, ...)`
@@ -617,7 +608,7 @@ Implementations MAY throw `UnsupportedOperationException`.
if not exists(FS, p) : raise FileNotFoundException
- if not isFile(FS, p) : raise [FileNotFoundException, IOException]
+ if not isFile(FS, p) : raise [FileAlreadyExistsException, FileNotFoundException, IOException]
#### Postconditions
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
index d61b6354498..0be220e6511 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
@@ -18,7 +18,12 @@
package org.apache.hadoop.fs.contract;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import org.slf4j.Logger;
@@ -27,6 +32,7 @@ import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/**
* Test append -if supported
@@ -75,15 +81,10 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
@Test
public void testAppendNonexistentFile() throws Throwable {
- try {
- FSDataOutputStream out = getFileSystem().append(target);
- //got here: trouble
- out.close();
- fail("expected a failure");
- } catch (Exception e) {
- //expected
- handleExpectedException(e);
- }
+ //expected
+ handleExpectedException(
+ intercept(Exception.class,
+ () -> getFileSystem().append(target).close()));
}
@Test
@@ -116,15 +117,9 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
@Test
public void testAppendMissingTarget() throws Throwable {
- try {
- FSDataOutputStream out = getFileSystem().append(target);
- //got here: trouble
- out.close();
- fail("expected a failure");
- } catch (Exception e) {
- //expected
- handleExpectedException(e);
- }
+ handleExpectedException(
+ intercept(Exception.class,
+ () -> getFileSystem().append(target).close()));
}
@Test
@@ -149,4 +144,30 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
dataset.length);
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
}
+
+ @Test
+ public void testAppendFileAfterDelete() throws Exception {
+ final FileSystem fs = getFileSystem();
+ final Path filePath = target;
+ fs.create(filePath);
+ fs.delete(filePath, false);
+ intercept(FileNotFoundException.class,
+ () -> fs.append(filePath));
+ }
+
+ @Test
+ public void testAppendDirectory() throws Exception {
+ final FileSystem fs = getFileSystem();
+
+ final Path folderPath = target;
+ fs.mkdirs(folderPath);
+ IOException ex = intercept(IOException.class,
+ () -> fs.append(folderPath));
+ if (ex instanceof FileAlreadyExistsException) {
+ handleExpectedException(ex);
+ } else {
+ handleRelaxedException("Append to a directory",
+ "FileAlreadyExistsException", ex);
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
index 7b120861edc..d30e0d66eff 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
@@ -19,15 +19,16 @@
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.Path;
+
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.contract.ContractTestUtils.assertFileHasLength;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/**
* Test concat -if supported
@@ -60,25 +61,15 @@ public abstract class AbstractContractConcatTest extends AbstractFSContractTestB
@Test
public void testConcatEmptyFiles() throws Throwable {
touch(getFileSystem(), target);
- try {
- getFileSystem().concat(target, new Path[0]);
- fail("expected a failure");
- } catch (Exception e) {
- //expected
- handleExpectedException(e);
- }
+ handleExpectedException(intercept(Exception.class,
+ () -> getFileSystem().concat(target, new Path[0])));
}
@Test
public void testConcatMissingTarget() throws Throwable {
- try {
- getFileSystem().concat(target,
- new Path[] { zeroByteFile});
- fail("expected a failure");
- } catch (Exception e) {
- //expected
- handleExpectedException(e);
- }
+ handleExpectedException(
+ intercept(Exception.class,
+ () -> getFileSystem().concat(target, new Path[]{zeroByteFile})));
}
@Test
@@ -98,15 +89,8 @@ public abstract class AbstractContractConcatTest extends AbstractFSContractTestB
public void testConcatOnSelf() throws Throwable {
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
createFile(getFileSystem(), target, false, block);
- try {
- getFileSystem().concat(target,
- new Path[]{target});
- } catch (Exception e) {
- //expected
- handleExpectedException(e);
- }
+ handleExpectedException(intercept(Exception.class,
+ () -> getFileSystem().concat(target, new Path[]{target})));
}
-
-
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
index 269e35ea669..cb706ede917 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.junit.Test;
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/**
* Test getFileStatus and related listing operations.
@@ -275,35 +276,22 @@ public abstract class AbstractContractGetFileStatusTest extends
@Test
public void testLocatedStatusNoDir() throws Throwable {
describe("test the LocatedStatus call on a path which is not present");
- try {
- RemoteIterator iterator
- = getFileSystem().listLocatedStatus(path("missing"));
- fail("Expected an exception, got an iterator: " + iterator);
- } catch (FileNotFoundException expected) {
- // expected
- }
+ intercept(FileNotFoundException.class,
+ () -> getFileSystem().listLocatedStatus(path("missing")));
}
@Test
public void testListStatusNoDir() throws Throwable {
describe("test the listStatus(path) call on a path which is not present");
- try {
- getFileSystem().listStatus(path("missing"));
- fail("Expected an exception");
- } catch (FileNotFoundException expected) {
- // expected
- }
+ intercept(FileNotFoundException.class,
+ () -> getFileSystem().listStatus(path("missing")));
}
@Test
public void testListStatusFilteredNoDir() throws Throwable {
describe("test the listStatus(path, filter) call on a missing path");
- try {
- getFileSystem().listStatus(path("missing"), ALL_PATHS);
- fail("Expected an exception");
- } catch (FileNotFoundException expected) {
- // expected
- }
+ intercept(FileNotFoundException.class,
+ () -> getFileSystem().listStatus(path("missing"), ALL_PATHS));
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
index c5a546dccdd..de44bc232e7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
@@ -26,6 +26,7 @@ import org.junit.Test;
import java.io.IOException;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@@ -175,4 +176,11 @@ public abstract class AbstractContractMkdirTest extends AbstractFSContractTestBa
}
}
+ @Test
+ public void testCreateDirWithExistingDir() throws Exception {
+ Path path = path("testCreateDirWithExistingDir");
+ final FileSystem fs = getFileSystem();
+ assertMkdirs(fs, path);
+ assertMkdirs(fs, path);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java
index d3dafe974a5..f09496a6082 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContract.java
@@ -148,7 +148,6 @@ public abstract class AbstractFSContract extends Configured {
* @param feature feature to query
* @param defval default value
* @return true if the feature is supported
- * @throws IOException IO problems
*/
public boolean isSupported(String feature, boolean defval) {
return getConf().getBoolean(getConfKey(feature), defval);
@@ -160,7 +159,6 @@ public abstract class AbstractFSContract extends Configured {
* @param feature feature to query
* @param defval default value
* @return true if the feature is supported
- * @throws IOException IO problems
*/
public int getLimit(String feature, int defval) {
return getConf().getInt(getConfKey(feature), defval);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index d113bf81607..908b7f3a0e6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -186,8 +186,11 @@ public class ContractTestUtils extends Assert {
(short) 1,
buffersize);
}
- out.write(src, 0, len);
- out.close();
+ try {
+ out.write(src, 0, len);
+ } finally {
+ out.close();
+ }
assertFileHasLength(fs, path, len);
}
@@ -888,6 +891,18 @@ public class ContractTestUtils extends Assert {
found);
}
+ /**
+ * Execute {@link FileSystem#mkdirs(Path)}; expect {@code true} back.
+ * (Note: does not work for localFS if the directory already exists)
+ * Does not perform any validation of the created directory.
+ * @param fs filesystem
+ * @param dir directory to create
+ * @throws IOException IO Problem
+ */
+ public static void assertMkdirs(FileSystem fs, Path dir) throws IOException {
+ assertTrue("mkdirs(" + dir + ") returned false", fs.mkdirs(dir));
+ }
+
/**
* Test for the host being an OSX machine.
* @return true if the JVM thinks that is running on OSX
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/Abfs.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/Abfs.java
index 707e264a365..32df9422386 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/Abfs.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/Abfs.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
-import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem;
@@ -32,7 +31,6 @@ import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
* Azure Blob File System implementation of AbstractFileSystem.
* This impl delegates to the old FileSystem
*/
-@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Abfs extends DelegateToFileSystem {
@@ -45,4 +43,4 @@ public class Abfs extends DelegateToFileSystem {
public int getUriDefaultPort() {
return -1;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/Abfss.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/Abfss.java
index 19c0f7a7d62..c33265ce324 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/Abfss.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/Abfss.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
-import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem;
@@ -32,7 +31,6 @@ import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
* Azure Blob File System implementation of AbstractFileSystem.
* This impl delegates to the old FileSystem
*/
-@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Abfss extends DelegateToFileSystem {
@@ -45,4 +43,4 @@ public class Abfss extends DelegateToFileSystem {
public int getUriDefaultPort() {
return -1;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
index cf5acbb0fd3..9f58f6b040a 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
@@ -36,14 +36,10 @@ import java.util.concurrent.Future;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.PathIOException;
-import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
import org.apache.commons.lang.ArrayUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
@@ -54,13 +50,15 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations;
import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
-import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode;
+import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.FileSystemOperationUnhandledException;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidUriAuthorityException;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidUriException;
+import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
@@ -69,8 +67,7 @@ import org.apache.hadoop.util.Progressable;
* A {@link org.apache.hadoop.fs.FileSystem} for reading and writing files stored on Windows Azure
*/
-@InterfaceAudience.Public
-@InterfaceStability.Stable
+@InterfaceStability.Evolving
public class AzureBlobFileSystem extends FileSystem {
public static final Logger LOG = LoggerFactory.getLogger(AzureBlobFileSystem.class);
private URI uri;
@@ -88,8 +85,7 @@ public class AzureBlobFileSystem extends FileSystem {
super.initialize(uri, configuration);
setConf(configuration);
- this.LOG.debug(
- "Initializing AzureBlobFileSystem for {}", uri);
+ LOG.debug("Initializing AzureBlobFileSystem for {}", uri);
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
this.userGroupInformation = UserGroupInformation.getCurrentUser();
@@ -97,16 +93,24 @@ public class AzureBlobFileSystem extends FileSystem {
this.primaryUserGroup = userGroupInformation.getPrimaryGroupName();
this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecure(), configuration, userGroupInformation);
- this.LOG.debug(
- "Initializing NativeAzureFileSystem for {}", uri);
+ LOG.debug("Initializing NativeAzureFileSystem for {}", uri);
this.setWorkingDirectory(this.getHomeDirectory());
if (abfsStore.getAbfsConfiguration().getCreateRemoteFileSystemDuringInitialization()) {
this.createFileSystem();
}
+ }
- this.mkdirs(this.workingDir);
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder(
+ "AzureBlobFileSystem{");
+ sb.append("uri=").append(uri);
+ sb.append(", user='").append(user).append('\'');
+ sb.append(", primaryUserGroup='").append(primaryUserGroup).append('\'');
+ sb.append('}');
+ return sb.toString();
}
public boolean isSecure() {
@@ -120,8 +124,7 @@ public class AzureBlobFileSystem extends FileSystem {
@Override
public FSDataInputStream open(final Path path, final int bufferSize) throws IOException {
- this.LOG.debug(
- "AzureBlobFileSystem.open path: {} bufferSize: {}", path.toString(), bufferSize);
+ LOG.debug("AzureBlobFileSystem.open path: {} bufferSize: {}", path, bufferSize);
try {
InputStream inputStream = abfsStore.openFileForRead(makeQualified(path), statistics);
@@ -135,9 +138,8 @@ public class AzureBlobFileSystem extends FileSystem {
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize,
final short replication, final long blockSize, final Progressable progress) throws IOException {
- this.LOG.debug(
- "AzureBlobFileSystem.create path: {} permission: {} overwrite: {} bufferSize: {}",
- f.toString(),
+ LOG.debug("AzureBlobFileSystem.create path: {} permission: {} overwrite: {} bufferSize: {}",
+ f,
permission,
overwrite,
blockSize);
@@ -196,7 +198,7 @@ public class AzureBlobFileSystem extends FileSystem {
@Override
public FSDataOutputStream append(final Path f, final int bufferSize, final Progressable progress) throws IOException {
- this.LOG.debug(
+ LOG.debug(
"AzureBlobFileSystem.append path: {} bufferSize: {}",
f.toString(),
bufferSize);
@@ -211,7 +213,7 @@ public class AzureBlobFileSystem extends FileSystem {
}
public boolean rename(final Path src, final Path dst) throws IOException {
- this.LOG.debug(
+ LOG.debug(
"AzureBlobFileSystem.rename src: {} dst: {}", src.toString(), dst.toString());
Path parentFolder = src.getParent();
@@ -250,7 +252,7 @@ public class AzureBlobFileSystem extends FileSystem {
@Override
public boolean delete(final Path f, final boolean recursive) throws IOException {
- this.LOG.debug(
+ LOG.debug(
"AzureBlobFileSystem.delete path: {} recursive: {}", f.toString(), recursive);
if (f.isRoot()) {
@@ -273,7 +275,7 @@ public class AzureBlobFileSystem extends FileSystem {
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
- this.LOG.debug(
+ LOG.debug(
"AzureBlobFileSystem.listStatus path: {}", f.toString());
try {
@@ -287,8 +289,8 @@ public class AzureBlobFileSystem extends FileSystem {
@Override
public boolean mkdirs(final Path f, final FsPermission permission) throws IOException {
- this.LOG.debug(
- "AzureBlobFileSystem.mkdirs path: {} permissions: {}", f.toString(), permission);
+ LOG.debug(
+ "AzureBlobFileSystem.mkdirs path: {} permissions: {}", f, permission);
final Path parentFolder = f.getParent();
if (parentFolder == null) {
@@ -312,13 +314,13 @@ public class AzureBlobFileSystem extends FileSystem {
}
super.close();
- this.LOG.debug("AzureBlobFileSystem.close");
+ LOG.debug("AzureBlobFileSystem.close");
this.isClosed = true;
}
@Override
public FileStatus getFileStatus(final Path f) throws IOException {
- this.LOG.debug("AzureBlobFileSystem.getFileStatus path: {}", f.toString());
+ LOG.debug("AzureBlobFileSystem.getFileStatus path: {}", f);
try {
return abfsStore.getFileStatus(makeQualified(f));
@@ -350,7 +352,8 @@ public class AzureBlobFileSystem extends FileSystem {
@Override
public Path getHomeDirectory() {
return makeQualified(new Path(
- FileSystemConfigurations.USER_HOME_DIRECTORY_PREFIX + "/" + this.userGroupInformation.getShortUserName()));
+ FileSystemConfigurations.USER_HOME_DIRECTORY_PREFIX
+ + "/" + this.userGroupInformation.getShortUserName()));
}
/**
@@ -360,7 +363,7 @@ public class AzureBlobFileSystem extends FileSystem {
*/
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file,
- long start, long len) throws IOException {
+ long start, long len) {
if (file == null) {
return null;
}
@@ -403,7 +406,7 @@ public class AzureBlobFileSystem extends FileSystem {
}
private boolean deleteRoot() throws IOException {
- this.LOG.debug("Deleting root content");
+ LOG.debug("Deleting root content");
final ExecutorService executorService = Executors.newFixedThreadPool(10);
@@ -441,15 +444,14 @@ public class AzureBlobFileSystem extends FileSystem {
private FileStatus tryGetFileStatus(final Path f) {
try {
return getFileStatus(f);
- }
- catch (IOException ex) {
- this.LOG.debug("File not found {}", f.toString());
+ } catch (IOException ex) {
+ LOG.debug("File not found {}", f);
return null;
}
}
private void createFileSystem() throws IOException {
- this.LOG.debug(
+ LOG.debug(
"AzureBlobFileSystem.createFileSystem uri: {}", uri);
try {
this.abfsStore.createFilesystem();
@@ -493,7 +495,8 @@ public class AzureBlobFileSystem extends FileSystem {
return false;
}
- if (scheme.equals(FileSystemUriSchemes.ABFS_SCHEME) || scheme.equals(FileSystemUriSchemes.ABFS_SECURE_SCHEME)) {
+ if (scheme.equals(FileSystemUriSchemes.ABFS_SCHEME)
+ || scheme.equals(FileSystemUriSchemes.ABFS_SECURE_SCHEME)) {
return true;
}
@@ -501,34 +504,45 @@ public class AzureBlobFileSystem extends FileSystem {
}
@VisibleForTesting
- FileSystemOperation execute(
+ FileSystemOperation execute(
final String scopeDescription,
final Callable callableFileOperation) throws IOException {
return execute(scopeDescription, callableFileOperation, null);
}
@VisibleForTesting
- FileSystemOperation execute(
+ FileSystemOperation execute(
final String scopeDescription,
final Callable callableFileOperation,
T defaultResultValue) throws IOException {
try {
final T executionResult = callableFileOperation.call();
- return new FileSystemOperation(executionResult, null);
+ return new FileSystemOperation<>(executionResult, null);
} catch (AbfsRestOperationException abfsRestOperationException) {
- return new FileSystemOperation(defaultResultValue, abfsRestOperationException);
+ return new FileSystemOperation<>(defaultResultValue, abfsRestOperationException);
} catch (AzureBlobFileSystemException azureBlobFileSystemException) {
throw new IOException(azureBlobFileSystemException);
} catch (Exception exception) {
if (exception instanceof ExecutionException) {
exception = (Exception) getRootCause(exception);
}
- final FileSystemOperationUnhandledException fileSystemOperationUnhandledException = new FileSystemOperationUnhandledException(exception);
+ final FileSystemOperationUnhandledException fileSystemOperationUnhandledException
+ = new FileSystemOperationUnhandledException(exception);
throw new IOException(fileSystemOperationUnhandledException);
}
}
+ /**
+ * Given a path and exception, choose which IOException subclass
+ * to create.
+ * Will return if and only iff the error code is in the list of allowed
+ * error codes.
+ * @param path path of operation triggering exception; may be null
+ * @param exception the exception caught
+ * @param allowedErrorCodesList varargs list of error codes.
+ * @throws IOException if the exception error code is not on the allowed list.
+ */
private void checkException(final Path path,
final AzureBlobFileSystemException exception,
final AzureServiceErrorCode... allowedErrorCodesList) throws IOException {
@@ -542,9 +556,11 @@ public class AzureBlobFileSystem extends FileSystem {
//AbfsRestOperationException.getMessage() contains full error info including path/uri.
if (statusCode == HttpURLConnection.HTTP_NOT_FOUND) {
- throw new FileNotFoundException(ere.getMessage());
+ throw (IOException)new FileNotFoundException(ere.getMessage())
+ .initCause(exception);
} else if (statusCode == HttpURLConnection.HTTP_CONFLICT) {
- throw new FileAlreadyExistsException(ere.getMessage());
+ throw (IOException)new FileAlreadyExistsException(ere.getMessage())
+ .initCause(exception);
} else {
throw ere;
}
@@ -601,4 +617,4 @@ public class AzureBlobFileSystem extends FileSystem {
AzureBlobFileSystemStore getAbfsStore() {
return this.abfsStore;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index 134277fd969..8ac31ce0372 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -103,7 +103,7 @@ public class AzureBlobFileSystemStore {
private final Set azureAtomicRenameDirSet;
- public AzureBlobFileSystemStore(URI uri, boolean isSeure, Configuration configuration, UserGroupInformation userGroupInformation)
+ public AzureBlobFileSystemStore(URI uri, boolean isSecure, Configuration configuration, UserGroupInformation userGroupInformation)
throws AzureBlobFileSystemException {
this.uri = uri;
try {
@@ -113,9 +113,10 @@ public class AzureBlobFileSystemStore {
}
this.userGroupInformation = userGroupInformation;
- this.azureAtomicRenameDirSet = new HashSet<>(Arrays.asList(abfsConfiguration.getAzureAtomicRenameDirs().split(AbfsHttpConstants.COMMA)));
+ this.azureAtomicRenameDirSet = new HashSet<>(Arrays.asList(
+ abfsConfiguration.getAzureAtomicRenameDirs().split(AbfsHttpConstants.COMMA)));
- initializeClient(uri, isSeure);
+ initializeClient(uri, isSecure);
}
@VisibleForTesting
@@ -134,8 +135,7 @@ public class AzureBlobFileSystemStore {
}
public Hashtable getFilesystemProperties() throws AzureBlobFileSystemException {
- this.LOG.debug(
- "getFilesystemProperties for filesystem: {}",
+ LOG.debug("getFilesystemProperties for filesystem: {}",
client.getFileSystem());
final Hashtable parsedXmsProperties;
@@ -148,13 +148,13 @@ public class AzureBlobFileSystemStore {
return parsedXmsProperties;
}
- public void setFilesystemProperties(final Hashtable properties) throws AzureBlobFileSystemException {
- if (properties == null || properties.size() == 0) {
+ public void setFilesystemProperties(final Hashtable properties)
+ throws AzureBlobFileSystemException {
+ if (properties == null || properties.isEmpty()) {
return;
}
- this.LOG.debug(
- "setFilesystemProperties for filesystem: {} with properties: {}",
+ LOG.debug("setFilesystemProperties for filesystem: {} with properties: {}",
client.getFileSystem(),
properties);
@@ -169,10 +169,9 @@ public class AzureBlobFileSystemStore {
}
public Hashtable getPathProperties(final Path path) throws AzureBlobFileSystemException {
- this.LOG.debug(
- "getPathProperties for filesystem: {} path: {}",
+ LOG.debug("getPathProperties for filesystem: {} path: {}",
client.getFileSystem(),
- path.toString());
+ path);
final Hashtable parsedXmsProperties;
final AbfsRestOperation op = client.getPathProperties(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path));
@@ -185,10 +184,9 @@ public class AzureBlobFileSystemStore {
}
public void setPathProperties(final Path path, final Hashtable properties) throws AzureBlobFileSystemException {
- this.LOG.debug(
- "setFilesystemProperties for filesystem: {} path: {} with properties: {}",
+ LOG.debug("setFilesystemProperties for filesystem: {} path: {} with properties: {}",
client.getFileSystem(),
- path.toString(),
+ path,
properties);
final String commaSeparatedProperties;
@@ -201,26 +199,23 @@ public class AzureBlobFileSystemStore {
}
public void createFilesystem() throws AzureBlobFileSystemException {
- this.LOG.debug(
- "createFilesystem for filesystem: {}",
+ LOG.debug("createFilesystem for filesystem: {}",
client.getFileSystem());
client.createFilesystem();
}
public void deleteFilesystem() throws AzureBlobFileSystemException {
- this.LOG.debug(
- "deleteFilesystem for filesystem: {}",
+ LOG.debug("deleteFilesystem for filesystem: {}",
client.getFileSystem());
client.deleteFilesystem();
}
public OutputStream createFile(final Path path, final boolean overwrite) throws AzureBlobFileSystemException {
- this.LOG.debug(
- "createFile filesystem: {} path: {} overwrite: {}",
+ LOG.debug("createFile filesystem: {} path: {} overwrite: {}",
client.getFileSystem(),
- path.toString(),
+ path,
overwrite);
client.createPath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), true, overwrite);
@@ -232,23 +227,19 @@ public class AzureBlobFileSystemStore {
return outputStream;
}
- public Void createDirectory(final Path path) throws AzureBlobFileSystemException {
- this.LOG.debug(
- "createDirectory filesystem: {} path: {} overwrite: {}",
+ public void createDirectory(final Path path) throws AzureBlobFileSystemException {
+ LOG.debug("createDirectory filesystem: {} path: {}",
client.getFileSystem(),
- path.toString());
+ path);
client.createPath("/" + getRelativePath(path), false, true);
-
- return null;
}
public InputStream openFileForRead(final Path path, final FileSystem.Statistics statistics) throws AzureBlobFileSystemException {
- this.LOG.debug(
- "openFileForRead filesystem: {} path: {}",
+ LOG.debug("openFileForRead filesystem: {} path: {}",
client.getFileSystem(),
- path.toString());
+ path);
final AbfsRestOperation op = client.getPathProperties(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path));
@@ -266,16 +257,16 @@ public class AzureBlobFileSystemStore {
// Add statistics for InputStream
return new FSDataInputStream(
- new AbfsInputStream(client, statistics, AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), contentLength,
+ new AbfsInputStream(client, statistics,
+ AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), contentLength,
abfsConfiguration.getReadBufferSize(), abfsConfiguration.getReadAheadQueueDepth(), eTag));
}
public OutputStream openFileForWrite(final Path path, final boolean overwrite) throws
AzureBlobFileSystemException {
- this.LOG.debug(
- "openFileForWrite filesystem: {} path: {} overwrite: {}",
+ LOG.debug("openFileForWrite filesystem: {} path: {} overwrite: {}",
client.getFileSystem(),
- path.toString(),
+ path,
overwrite);
final AbfsRestOperation op = client.getPathProperties(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path));
@@ -304,23 +295,21 @@ public class AzureBlobFileSystemStore {
AzureBlobFileSystemException {
if (isAtomicRenameKey(source.getName())) {
- this.LOG.warn("The atomic rename feature is not supported by the ABFS scheme; however rename,"
+ LOG.warn("The atomic rename feature is not supported by the ABFS scheme; however rename,"
+" create and delete operations are atomic if Namespace is enabled for your Azure Storage account.");
}
- this.LOG.debug(
- "renameAsync filesystem: {} source: {} destination: {}",
+ LOG.debug("renameAsync filesystem: {} source: {} destination: {}",
client.getFileSystem(),
- source.toString(),
- destination.toString());
+ source,
+ destination);
String continuation = null;
long deadline = now() + RENAME_TIMEOUT_MILISECONDS;
do {
if (now() > deadline) {
- LOG.debug(
- "Rename {} to {} timed out.",
+ LOG.debug("Rename {} to {} timed out.",
source,
destination);
@@ -334,13 +323,12 @@ public class AzureBlobFileSystemStore {
} while (continuation != null && !continuation.isEmpty());
}
- public void delete(final Path path, final boolean recursive) throws
- AzureBlobFileSystemException {
+ public void delete(final Path path, final boolean recursive)
+ throws AzureBlobFileSystemException {
- this.LOG.debug(
- "delete filesystem: {} path: {} recursive: {}",
+ LOG.debug("delete filesystem: {} path: {} recursive: {}",
client.getFileSystem(),
- path.toString(),
+ path,
String.valueOf(recursive));
String continuation = null;
@@ -348,13 +336,13 @@ public class AzureBlobFileSystemStore {
do {
if (now() > deadline) {
- this.LOG.debug(
- "Delete directory {} timed out.", path);
+ LOG.debug("Delete directory {} timed out.", path);
throw new TimeoutException("Delete directory timed out.");
}
- AbfsRestOperation op = client.deletePath(AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), recursive, continuation);
+ AbfsRestOperation op = client.deletePath(
+ AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path), recursive, continuation);
continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION);
} while (continuation != null && !continuation.isEmpty());
@@ -362,10 +350,9 @@ public class AzureBlobFileSystemStore {
public FileStatus getFileStatus(final Path path) throws IOException {
- this.LOG.debug(
- "getFileStatus filesystem: {} path: {}",
+ LOG.debug("getFileStatus filesystem: {} path: {}",
client.getFileSystem(),
- path.toString());
+ path);
if (path.isRoot()) {
AbfsRestOperation op = client.getFilesystemProperties();
@@ -405,10 +392,9 @@ public class AzureBlobFileSystemStore {
}
public FileStatus[] listStatus(final Path path) throws IOException {
- this.LOG.debug(
- "listStatus filesystem: {} path: {}",
+ LOG.debug("listStatus filesystem: {} path: {}",
client.getFileSystem(),
- path.toString());
+ path);
String relativePath = path.isRoot() ? AbfsHttpConstants.EMPTY_STRING : getRelativePath(path);
String continuation = null;
@@ -480,10 +466,12 @@ public class AzureBlobFileSystemStore {
final String[] authorityParts = authority.split(AbfsHttpConstants.AZURE_DISTRIBUTED_FILE_SYSTEM_AUTHORITY_DELIMITER, 2);
- if (authorityParts.length < 2 || "".equals(authorityParts[0])) {
+ if (authorityParts.length < 2 || authorityParts[0] != null
+ && authorityParts[0].isEmpty()) {
final String errMsg = String
- .format("URI '%s' has a malformed authority, expected container name. "
- + "Authority takes the form "+ FileSystemUriSchemes.ABFS_SCHEME + "://[@]",
+ .format("'%s' has a malformed authority, expected container name. "
+ + "Authority takes the form "
+ + FileSystemUriSchemes.ABFS_SCHEME + "://[@]",
uri.toString());
throw new InvalidUriException(errMsg);
}
@@ -499,11 +487,16 @@ public class AzureBlobFileSystemStore {
try {
baseUrl = new URL(url);
} catch (MalformedURLException e) {
- throw new InvalidUriException(String.format("URI '%s' is malformed", uri.toString()));
+ throw new InvalidUriException(uri.toString());
}
+ int dotIndex = accountName.indexOf(AbfsHttpConstants.DOT);
+ if (dotIndex <= 0) {
+ throw new InvalidUriException(
+ uri.toString() + " - account name is not fully qualified.");
+ }
SharedKeyCredentials creds =
- new SharedKeyCredentials(accountName.substring(0, accountName.indexOf(AbfsHttpConstants.DOT)),
+ new SharedKeyCredentials(accountName.substring(0, dotIndex),
this.abfsConfiguration.getStorageAccountKey(accountName));
this.client = new AbfsClient(baseUrl, creds, abfsConfiguration, new ExponentialRetryPolicy());
@@ -513,7 +506,7 @@ public class AzureBlobFileSystemStore {
Preconditions.checkNotNull(path, "path");
final String relativePath = path.toUri().getPath();
- if (relativePath.length() == 0) {
+ if (relativePath.isEmpty()) {
return relativePath;
}
@@ -537,7 +530,8 @@ public class AzureBlobFileSystemStore {
}
private boolean parseIsDirectory(final String resourceType) {
- return resourceType == null ? false : resourceType.equalsIgnoreCase(AbfsHttpConstants.DIRECTORY);
+ return resourceType != null
+ && resourceType.equalsIgnoreCase(AbfsHttpConstants.DIRECTORY);
}
private DateTime parseLastModifiedTime(final String lastModifiedTime) {
@@ -628,7 +622,7 @@ public class AzureBlobFileSystemStore {
}
}
} catch (URISyntaxException e) {
- this.LOG.info("URI syntax error creating URI for {}", dir);
+ LOG.info("URI syntax error creating URI for {}", dir);
}
}
@@ -658,20 +652,21 @@ public class AzureBlobFileSystemStore {
*/
@Override
public boolean equals(Object obj) {
- if (obj == this) {
- return true;
- }
-
- if (obj == null) {
+ if (!(obj instanceof FileStatus)) {
return false;
}
- if (this.getClass() == obj.getClass()) {
- VersionedFileStatus other = (VersionedFileStatus) obj;
- return this.getPath().equals(other.getPath()) && this.version.equals(other.version);
+ FileStatus other = (FileStatus) obj;
+
+ if (!other.equals(this)) {// compare the path
+ return false;
}
- return false;
+ if (other instanceof VersionedFileStatus) {
+ return this.version.equals(((VersionedFileStatus)other).version);
+ }
+
+ return true;
}
/**
@@ -695,6 +690,16 @@ public class AzureBlobFileSystemStore {
public String getVersion() {
return this.version;
}
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder(
+ "VersionedFileStatus{");
+ sb.append(super.toString());
+ sb.append("; version='").append(version).append('\'');
+ sb.append('}');
+ return sb.toString();
+ }
}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/SecureAzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/SecureAzureBlobFileSystem.java
index a6ad8299231..15fe5427252 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/SecureAzureBlobFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/SecureAzureBlobFileSystem.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.fs.azurebfs;
-import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
@@ -26,7 +25,6 @@ import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
* A secure {@link org.apache.hadoop.fs.FileSystem} for reading and writing files stored on Windows Azure
*/
-@InterfaceAudience.Public
@InterfaceStability.Evolving
public class SecureAzureBlobFileSystem extends AzureBlobFileSystem {
@Override
@@ -38,4 +36,4 @@ public class SecureAzureBlobFileSystem extends AzureBlobFileSystem {
public String getScheme() {
return FileSystemUriSchemes.ABFS_SECURE_SCHEME;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java
index 2ec4db0197c..f80bc605cb6 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/AbfsHttpConstants.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
- * Responsible to keep all constant keys used in abfs rest client here
+ * Responsible to keep all constant keys used in abfs rest client here.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java
index 9b7f9bc6ce7..4603b5fd03e 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpHeaderConfigurations.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
- * Responsible to keep all abfs http headers here
+ * Responsible to keep all abfs http headers here.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java
index a9f7d3350b7..f58d33a1302 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/HttpQueryParams.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
- * Responsible to keep all Http Query params here
+ * Responsible to keep all Http Query params here.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java
index 462ebbc88f6..82c571a3b03 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java
@@ -25,12 +25,12 @@ import java.lang.annotation.Target;
import org.apache.hadoop.classification.InterfaceStability;
/**
- * Definitions of Annotations for all types of the validators
+ * Definitions of Annotations for all types of the validators.
*/
@InterfaceStability.Evolving
public class ConfigurationValidationAnnotations {
/**
- * Describes the requirements when validating the annotated int field
+ * Describes the requirements when validating the annotated int field.
*/
@Target({ ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
@@ -47,7 +47,7 @@ public class ConfigurationValidationAnnotations {
}
/**
- * Describes the requirements when validating the annotated long field
+ * Describes the requirements when validating the annotated long field.
*/
@Target({ ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
@@ -64,7 +64,7 @@ public class ConfigurationValidationAnnotations {
}
/**
- * Describes the requirements when validating the annotated String field
+ * Describes the requirements when validating the annotated String field.
*/
@Target({ ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
@@ -77,7 +77,7 @@ public class ConfigurationValidationAnnotations {
}
/**
- * Describes the requirements when validating the annotated String field
+ * Describes the requirements when validating the annotated String field.
*/
@Target({ ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
@@ -90,7 +90,7 @@ public class ConfigurationValidationAnnotations {
}
/**
- * Describes the requirements when validating the annotated boolean field
+ * Describes the requirements when validating the annotated boolean field.
*/
@Target({ ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
@@ -101,4 +101,4 @@ public class ConfigurationValidationAnnotations {
boolean ThrowIfInvalid() default false;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/diagnostics/ConfigurationValidator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/diagnostics/ConfigurationValidator.java
index 796f7859567..d61229ee803 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/diagnostics/ConfigurationValidator.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/diagnostics/ConfigurationValidator.java
@@ -18,19 +18,17 @@
package org.apache.hadoop.fs.azurebfs.contracts.diagnostics;
-import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException;
/**
* ConfigurationValidator to validate the value of a configuration key
- * @param the type of the validator and the validated value
+ * @param the type of the validator and the validated value.
*/
-@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface ConfigurationValidator {
/**
- * Validates the configValue
+ * Validates the configValue.
* @return validated value of type T
*/
T validate(String configValue) throws InvalidConfigurationValueException;
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AzureBlobFileSystemException.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AzureBlobFileSystemException.java
index f31c680628b..9b1bead886e 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AzureBlobFileSystemException.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/AzureBlobFileSystemException.java
@@ -40,7 +40,7 @@ public abstract class AzureBlobFileSystemException extends IOException {
@Override
public String toString() {
if (this.getMessage() == null && this.getCause() == null) {
- return "";
+ return "AzureBlobFileSystemException";
}
if (this.getCause() == null) {
@@ -53,4 +53,4 @@ public abstract class AzureBlobFileSystemException extends IOException {
return this.getMessage() + this.getCause().toString();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/InvalidUriException.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/InvalidUriException.java
index a84495afc61..4fa01509779 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/InvalidUriException.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/exceptions/InvalidUriException.java
@@ -28,6 +28,6 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Evolving
public final class InvalidUriException extends AzureBlobFileSystemException {
public InvalidUriException(String url) {
- super(String.format("%s is invalid.", url));
+ super(String.format("Invalid URI %s", url));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/Base64StringConfigurationBasicValidator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/Base64StringConfigurationBasicValidator.java
index 69288c5198f..6bb997bbc79 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/Base64StringConfigurationBasicValidator.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/Base64StringConfigurationBasicValidator.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationVa
import org.apache.commons.codec.binary.Base64;
/**
-* String Base64 configuration value Validator
+* String Base64 configuration value Validator.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/BooleanConfigurationBasicValidator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/BooleanConfigurationBasicValidator.java
index c9927ff0856..b16abdd09b5 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/BooleanConfigurationBasicValidator.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/BooleanConfigurationBasicValidator.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException;
/**
- * Boolean configuration value validator
+ * Boolean configuration value validator.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@@ -47,4 +47,4 @@ public class BooleanConfigurationBasicValidator extends ConfigurationBasicValida
throw new InvalidConfigurationValueException(getConfigKey());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/ConfigurationBasicValidator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/ConfigurationBasicValidator.java
index 7da809cdcad..8555a29805a 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/ConfigurationBasicValidator.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/ConfigurationBasicValidator.java
@@ -38,7 +38,7 @@ abstract class ConfigurationBasicValidator implements ConfigurationValidator
/**
* This method handles the base case where the configValue is null, based on the throwIfInvalid it either throws or returns the defaultVal,
- * otherwise it returns null indicating that the configValue needs to be validated further
+ * otherwise it returns null indicating that the configValue needs to be validated further.
* @param configValue the configuration value set by the user
* @return the defaultVal in case the configValue is null and not required to be set, null in case the configValue not null
* @throws InvalidConfigurationValueException in case the configValue is null and required to be set
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java
index ec38cd8ea47..26c7d2f0ac1 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/IntegerConfigurationBasicValidator.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.diagnostics.ConfigurationValidato
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException;
/**
- * Integer configuration value Validator
+ * Integer configuration value Validator.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/LongConfigurationBasicValidator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/LongConfigurationBasicValidator.java
index 559dbc0c49b..32ac14cea61 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/LongConfigurationBasicValidator.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/LongConfigurationBasicValidator.java
@@ -17,15 +17,13 @@
*/
package org.apache.hadoop.fs.azurebfs.diagnostics;
-import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.azurebfs.contracts.diagnostics.ConfigurationValidator;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException;
/**
- * Long configuration value Validator
+ * Long configuration value Validator.
*/
-@InterfaceAudience.Public
@InterfaceStability.Evolving
public class LongConfigurationBasicValidator extends ConfigurationBasicValidator implements ConfigurationValidator {
private final long min;
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/StringConfigurationBasicValidator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/StringConfigurationBasicValidator.java
index d6f9c59e5d2..0d344d13434 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/StringConfigurationBasicValidator.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/diagnostics/StringConfigurationBasicValidator.java
@@ -18,15 +18,13 @@
package org.apache.hadoop.fs.azurebfs.diagnostics;
-import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.azurebfs.contracts.diagnostics.ConfigurationValidator;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException;
/**
- * String configuration value Validator
+ * String configuration value Validator.
*/
-@InterfaceAudience.Public
@InterfaceStability.Evolving
public class StringConfigurationBasicValidator extends ConfigurationBasicValidator implements ConfigurationValidator{
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java
index a78e7af4660..2b3ccc0472d 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java
@@ -26,14 +26,17 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
-import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
-import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidUriException;
-import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
-import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations;
-import org.apache.hadoop.fs.azurebfs.constants.HttpQueryParams;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
+import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidUriException;
+
+
+import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.*;
+import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.*;
+import static org.apache.hadoop.fs.azurebfs.constants.HttpQueryParams.*;
+
/**
* AbfsClient
*/
@@ -53,7 +56,7 @@ public class AbfsClient {
this.baseUrl = baseUrl;
this.sharedKeyCredentials = sharedKeyCredentials;
String baseUrlString = baseUrl.toString();
- this.filesystem = baseUrlString.substring(baseUrlString.lastIndexOf(AbfsHttpConstants.FORWARD_SLASH) + 1);
+ this.filesystem = baseUrlString.substring(baseUrlString.lastIndexOf(FORWARD_SLASH) + 1);
this.abfsConfiguration = abfsConfiguration;
this.retryPolicy = exponentialRetryPolicy;
this.userAgent = initializeUserAgent();
@@ -73,19 +76,19 @@ public class AbfsClient {
List createDefaultHeaders() {
final List requestHeaders = new ArrayList();
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_VERSION, xMsVersion));
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.ACCEPT, AbfsHttpConstants.APPLICATION_JSON
- + AbfsHttpConstants.COMMA + AbfsHttpConstants.SINGLE_WHITE_SPACE + AbfsHttpConstants.APPLICATION_OCTET_STREAM));
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.ACCEPT_CHARSET,
- AbfsHttpConstants.UTF_8));
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.CONTENT_TYPE, AbfsHttpConstants.EMPTY_STRING));
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.USER_AGENT, userAgent));
+ requestHeaders.add(new AbfsHttpHeader(X_MS_VERSION, xMsVersion));
+ requestHeaders.add(new AbfsHttpHeader(ACCEPT, APPLICATION_JSON
+ + COMMA + SINGLE_WHITE_SPACE + APPLICATION_OCTET_STREAM));
+ requestHeaders.add(new AbfsHttpHeader(ACCEPT_CHARSET,
+ UTF_8));
+ requestHeaders.add(new AbfsHttpHeader(CONTENT_TYPE, EMPTY_STRING));
+ requestHeaders.add(new AbfsHttpHeader(USER_AGENT, userAgent));
return requestHeaders;
}
AbfsUriQueryBuilder createDefaultUriQueryBuilder() {
final AbfsUriQueryBuilder abfsUriQueryBuilder = new AbfsUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_TIMEOUT, AbfsHttpConstants.DEFAULT_TIMEOUT);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_TIMEOUT, DEFAULT_TIMEOUT);
return abfsUriQueryBuilder;
}
@@ -93,12 +96,12 @@ public class AbfsClient {
final List requestHeaders = createDefaultHeaders();
final AbfsUriQueryBuilder abfsUriQueryBuilder = new AbfsUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RESOURCE, AbfsHttpConstants.FILESYSTEM);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, FILESYSTEM);
final URL url = createRequestUrl(abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_PUT,
+ HTTP_METHOD_PUT,
url,
requestHeaders);
op.execute();
@@ -109,19 +112,19 @@ public class AbfsClient {
final List requestHeaders = createDefaultHeaders();
// JDK7 does not support PATCH, so to workaround the issue we will use
// PUT and specify the real method in the X-Http-Method-Override header.
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_HTTP_METHOD_OVERRIDE,
- AbfsHttpConstants.HTTP_METHOD_PATCH));
+ requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE,
+ HTTP_METHOD_PATCH));
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_PROPERTIES,
+ requestHeaders.add(new AbfsHttpHeader(X_MS_PROPERTIES,
properties));
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RESOURCE, AbfsHttpConstants.FILESYSTEM);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, FILESYSTEM);
final URL url = createRequestUrl(abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_PUT,
+ HTTP_METHOD_PUT,
url,
requestHeaders);
op.execute();
@@ -133,16 +136,16 @@ public class AbfsClient {
final List requestHeaders = createDefaultHeaders();
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RESOURCE, AbfsHttpConstants.FILESYSTEM);
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_DIRECTORY, relativePath == null ? "" : urlEncode(relativePath));
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RECURSIVE, String.valueOf(recursive));
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_CONTINUATION, continuation);
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_MAXRESULTS, String.valueOf(listMaxResults));
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, FILESYSTEM);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_DIRECTORY, relativePath == null ? "" : urlEncode(relativePath));
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RECURSIVE, String.valueOf(recursive));
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_CONTINUATION, continuation);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_MAXRESULTS, String.valueOf(listMaxResults));
final URL url = createRequestUrl(abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_GET,
+ HTTP_METHOD_GET,
url,
requestHeaders);
op.execute();
@@ -153,12 +156,12 @@ public class AbfsClient {
final List requestHeaders = createDefaultHeaders();
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RESOURCE, AbfsHttpConstants.FILESYSTEM);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, FILESYSTEM);
final URL url = createRequestUrl(abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_HEAD,
+ HTTP_METHOD_HEAD,
url,
requestHeaders);
op.execute();
@@ -169,12 +172,12 @@ public class AbfsClient {
final List requestHeaders = createDefaultHeaders();
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RESOURCE, AbfsHttpConstants.FILESYSTEM);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, FILESYSTEM);
final URL url = createRequestUrl(abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_DELETE,
+ HTTP_METHOD_DELETE,
url,
requestHeaders);
op.execute();
@@ -185,16 +188,16 @@ public class AbfsClient {
throws AzureBlobFileSystemException {
final List requestHeaders = createDefaultHeaders();
if (!overwrite) {
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.IF_NONE_MATCH, "*"));
+ requestHeaders.add(new AbfsHttpHeader(IF_NONE_MATCH, "*"));
}
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RESOURCE, isFile ? AbfsHttpConstants.FILE : AbfsHttpConstants.DIRECTORY);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RESOURCE, isFile ? FILE : DIRECTORY);
final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_PUT,
+ HTTP_METHOD_PUT,
url,
requestHeaders);
op.execute();
@@ -205,17 +208,17 @@ public class AbfsClient {
throws AzureBlobFileSystemException {
final List requestHeaders = createDefaultHeaders();
- final String encodedRenameSource = urlEncode(AbfsHttpConstants.FORWARD_SLASH + this.getFileSystem() + source);
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_RENAME_SOURCE, encodedRenameSource));
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.IF_NONE_MATCH, AbfsHttpConstants.STAR));
+ final String encodedRenameSource = urlEncode(FORWARD_SLASH + this.getFileSystem() + source);
+ requestHeaders.add(new AbfsHttpHeader(X_MS_RENAME_SOURCE, encodedRenameSource));
+ requestHeaders.add(new AbfsHttpHeader(IF_NONE_MATCH, STAR));
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_CONTINUATION, continuation);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_CONTINUATION, continuation);
final URL url = createRequestUrl(destination, abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_PUT,
+ HTTP_METHOD_PUT,
url,
requestHeaders);
op.execute();
@@ -227,17 +230,17 @@ public class AbfsClient {
final List requestHeaders = createDefaultHeaders();
// JDK7 does not support PATCH, so to workaround the issue we will use
// PUT and specify the real method in the X-Http-Method-Override header.
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_HTTP_METHOD_OVERRIDE,
- AbfsHttpConstants.HTTP_METHOD_PATCH));
+ requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE,
+ HTTP_METHOD_PATCH));
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.APPEND_ACTION);
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_POSITION, Long.toString(position));
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, APPEND_ACTION);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_POSITION, Long.toString(position));
final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_PUT,
+ HTTP_METHOD_PUT,
url,
requestHeaders, buffer, offset, length);
op.execute();
@@ -245,44 +248,46 @@ public class AbfsClient {
}
- public AbfsRestOperation flush(final String path, final long position, boolean retainUncommittedData) throws AzureBlobFileSystemException {
+ public AbfsRestOperation flush(final String path, final long position, boolean retainUncommittedData)
+ throws AzureBlobFileSystemException {
final List requestHeaders = createDefaultHeaders();
// JDK7 does not support PATCH, so to workaround the issue we will use
// PUT and specify the real method in the X-Http-Method-Override header.
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_HTTP_METHOD_OVERRIDE,
- AbfsHttpConstants.HTTP_METHOD_PATCH));
+ requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE,
+ HTTP_METHOD_PATCH));
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.FLUSH_ACTION);
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_POSITION, Long.toString(position));
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RETAIN_UNCOMMITTED_DATA, String.valueOf(retainUncommittedData));
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, FLUSH_ACTION);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_POSITION, Long.toString(position));
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RETAIN_UNCOMMITTED_DATA, String.valueOf(retainUncommittedData));
final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_PUT,
+ HTTP_METHOD_PUT,
url,
requestHeaders);
op.execute();
return op;
}
- public AbfsRestOperation setPathProperties(final String path, final String properties) throws AzureBlobFileSystemException {
+ public AbfsRestOperation setPathProperties(final String path, final String properties)
+ throws AzureBlobFileSystemException {
final List requestHeaders = createDefaultHeaders();
// JDK7 does not support PATCH, so to workaround the issue we will use
// PUT and specify the real method in the X-Http-Method-Override header.
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_HTTP_METHOD_OVERRIDE,
- AbfsHttpConstants.HTTP_METHOD_PATCH));
+ requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE,
+ HTTP_METHOD_PATCH));
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_PROPERTIES, properties));
+ requestHeaders.add(new AbfsHttpHeader(X_MS_PROPERTIES, properties));
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.SET_PROPERTIES_ACTION);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_ACTION, SET_PROPERTIES_ACTION);
final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_PUT,
+ HTTP_METHOD_PUT,
url,
requestHeaders);
op.execute();
@@ -297,7 +302,7 @@ public class AbfsClient {
final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_HEAD,
+ HTTP_METHOD_HEAD,
url,
requestHeaders);
op.execute();
@@ -307,9 +312,9 @@ public class AbfsClient {
public AbfsRestOperation read(final String path, final long position, final byte[] buffer, final int bufferOffset,
final int bufferLength, final String eTag) throws AzureBlobFileSystemException {
final List requestHeaders = createDefaultHeaders();
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.RANGE,
+ requestHeaders.add(new AbfsHttpHeader(RANGE,
String.format("bytes=%d-%d", position, position + bufferLength - 1)));
- requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.IF_MATCH, eTag));
+ requestHeaders.add(new AbfsHttpHeader(IF_MATCH, eTag));
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
@@ -317,7 +322,7 @@ public class AbfsClient {
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_GET,
+ HTTP_METHOD_GET,
url,
requestHeaders,
buffer,
@@ -333,13 +338,13 @@ public class AbfsClient {
final List requestHeaders = createDefaultHeaders();
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_RECURSIVE, String.valueOf(recursive));
- abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_CONTINUATION, continuation);
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_RECURSIVE, String.valueOf(recursive));
+ abfsUriQueryBuilder.addQuery(QUERY_PARAM_CONTINUATION, continuation);
final URL url = createRequestUrl(path, abfsUriQueryBuilder.toString());
final AbfsRestOperation op = new AbfsRestOperation(
this,
- AbfsHttpConstants.HTTP_METHOD_DELETE,
+ HTTP_METHOD_DELETE,
url,
requestHeaders);
op.execute();
@@ -347,7 +352,7 @@ public class AbfsClient {
}
private URL createRequestUrl(final String query) throws AzureBlobFileSystemException {
- return createRequestUrl(AbfsHttpConstants.EMPTY_STRING, query);
+ return createRequestUrl(EMPTY_STRING, query);
}
private URL createRequestUrl(final String path, final String query)
@@ -357,8 +362,8 @@ public class AbfsClient {
try {
encodedPath = urlEncode(path);
} catch (AzureBlobFileSystemException ex) {
- this.LOG.debug(
- "Unexpected error.", ex);
+ LOG.debug("Unexpected error.", ex);
+ throw new InvalidUriException(path);
}
final StringBuilder sb = new StringBuilder();
@@ -378,9 +383,9 @@ public class AbfsClient {
private static String urlEncode(final String value) throws AzureBlobFileSystemException {
String encodedString = null;
try {
- encodedString = URLEncoder.encode(value, AbfsHttpConstants.UTF_8)
- .replace(AbfsHttpConstants.PLUS, AbfsHttpConstants.PLUS_ENCODE)
- .replace(AbfsHttpConstants.FORWARD_SLASH_ENCODE, AbfsHttpConstants.FORWARD_SLASH);
+ encodedString = URLEncoder.encode(value, UTF_8)
+ .replace(PLUS, PLUS_ENCODE)
+ .replace(FORWARD_SLASH_ENCODE, FORWARD_SLASH);
} catch (UnsupportedEncodingException ex) {
throw new InvalidUriException(value);
}
@@ -391,11 +396,11 @@ public class AbfsClient {
private String initializeUserAgent() {
final String userAgentComment = String.format(Locale.ROOT,
"(JavaJRE %s; %s %s)",
- System.getProperty(AbfsHttpConstants.JAVA_VERSION),
- System.getProperty(AbfsHttpConstants.OS_NAME)
- .replaceAll(AbfsHttpConstants.SINGLE_WHITE_SPACE, AbfsHttpConstants.EMPTY_STRING),
- System.getProperty(AbfsHttpConstants.OS_VERSION));
+ System.getProperty(JAVA_VERSION),
+ System.getProperty(OS_NAME)
+ .replaceAll(SINGLE_WHITE_SPACE, EMPTY_STRING),
+ System.getProperty(OS_VERSION));
- return String.format(AbfsHttpConstants.CLIENT_VERSION + " %s", userAgentComment);
+ return String.format(CLIENT_VERSION + " %s", userAgentComment);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java
index 0ea936569b9..53f69004d8c 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsHttpOperation.java
@@ -30,12 +30,12 @@ import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.databind.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations;
import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Represents an HTTP operation.
@@ -427,4 +427,4 @@ public class AbfsHttpOperation {
private boolean isNullInputStream(InputStream stream) {
return stream == null ? true : false;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java
index 6554380ebd0..848ce8ac953 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsInputStream.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
/**
- * The AbfsInputStream for AbfsClient
+ * The AbfsInputStream for AbfsClient.
*/
public class AbfsInputStream extends FSInputStream {
private final AbfsClient client;
@@ -59,7 +59,6 @@ public class AbfsInputStream extends FSInputStream {
final int bufferSize,
final int readAheadQueueDepth,
final String eTag) {
- super();
this.client = client;
this.statistics = statistics;
this.path = path;
@@ -379,4 +378,4 @@ public class AbfsInputStream extends FSInputStream {
public boolean markSupported() {
return false;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java
index de5c934d64e..2dbcee57f59 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.fs.azurebfs.services;
import java.io.IOException;
+import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.LinkedBlockingQueue;
@@ -35,7 +36,7 @@ import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
/**
- * The BlobFsOutputStream for Rest AbfsClient
+ * The BlobFsOutputStream for Rest AbfsClient.
*/
public class AbfsOutputStream extends OutputStream implements Syncable {
private final AbfsClient client;
@@ -79,8 +80,8 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
maxConcurrentRequestCount,
10L,
TimeUnit.SECONDS,
- new LinkedBlockingQueue());
- this.completionService = new ExecutorCompletionService(this.threadExecutor);
+ new LinkedBlockingQueue<>());
+ this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
}
/**
@@ -111,9 +112,7 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
@Override
public synchronized void write(final byte[] data, final int off, final int length)
throws IOException {
- if (this.lastError != null) {
- throw this.lastError;
- }
+ maybeThrowLastError();
Preconditions.checkArgument(data != null, "null data");
@@ -142,6 +141,19 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
}
}
+ /**
+ * Throw the last error recorded if not null.
+ * After the stream is closed, this is always set to
+ * an exception, so acts as a guard against method invocation once
+ * closed.
+ * @throws IOException if lastError is set
+ */
+ private void maybeThrowLastError() throws IOException {
+ if (lastError != null) {
+ throw lastError;
+ }
+ }
+
/**
* Flushes this output stream and forces any buffered output bytes to be
* written out. If any data remains in the payload it is committed to the
@@ -150,7 +162,7 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
*/
@Override
public void flush() throws IOException {
- this.flushInternalAsync();
+ flushInternalAsync();
}
/** Similar to posix fsync, flush out the data in client's user buffer
@@ -159,7 +171,7 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
*/
@Override
public void hsync() throws IOException {
- this.flushInternal();
+ flushInternal();
}
/** Flush out the data in client's user buffer. After the return of
@@ -168,7 +180,7 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
*/
@Override
public void hflush() throws IOException {
- this.flushInternal();
+ flushInternal();
}
/**
@@ -186,34 +198,30 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
}
try {
- this.flushInternal();
- this.threadExecutor.shutdown();
+ flushInternal();
+ threadExecutor.shutdown();
} finally {
- this.lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
- this.buffer = null;
- this.bufferIndex = 0;
- this.closed = true;
- this.writeOperations.clear();
- if (!this.threadExecutor.isShutdown()) {
- this.threadExecutor.shutdownNow();
+ lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ buffer = null;
+ bufferIndex = 0;
+ closed = true;
+ writeOperations.clear();
+ if (!threadExecutor.isShutdown()) {
+ threadExecutor.shutdownNow();
}
}
}
private synchronized void flushInternal() throws IOException {
- if (this.lastError != null) {
- throw this.lastError;
- }
- this.writeCurrentBufferToService();
- this.flushWrittenBytesToService();
+ maybeThrowLastError();
+ writeCurrentBufferToService();
+ flushWrittenBytesToService();
}
private synchronized void flushInternalAsync() throws IOException {
- if (this.lastError != null) {
- throw this.lastError;
- }
- this.writeCurrentBufferToService();
- this.flushWrittenBytesToServiceAsync();
+ maybeThrowLastError();
+ writeCurrentBufferToService();
+ flushWrittenBytesToServiceAsync();
}
private synchronized void writeCurrentBufferToService() throws IOException {
@@ -221,19 +229,19 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
return;
}
- final byte[] bytes = this.buffer;
+ final byte[] bytes = buffer;
final int bytesLength = bufferIndex;
- this.buffer = new byte[bufferSize];
- this.bufferIndex = 0;
- final long offset = this.position;
- this.position += bytesLength;
+ buffer = new byte[bufferSize];
+ bufferIndex = 0;
+ final long offset = position;
+ position += bytesLength;
- if (this.threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) {
- this.waitForTaskToComplete();
+ if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) {
+ waitForTaskToComplete();
}
- final Future job = this.completionService.submit(new Callable() {
+ final Future job = completionService.submit(new Callable() {
@Override
public Void call() throws Exception {
client.append(path, offset, bytes, 0,
@@ -242,25 +250,25 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
}
});
- this.writeOperations.add(new WriteOperation(job, offset, bytesLength));
+ writeOperations.add(new WriteOperation(job, offset, bytesLength));
// Try to shrink the queue
shrinkWriteOperationQueue();
}
private synchronized void flushWrittenBytesToService() throws IOException {
- for (WriteOperation writeOperation : this.writeOperations) {
+ for (WriteOperation writeOperation : writeOperations) {
try {
writeOperation.task.get();
} catch (Exception ex) {
- if (AzureBlobFileSystemException.class.isInstance(ex.getCause())) {
- ex = AzureBlobFileSystemException.class.cast(ex.getCause());
+ if (ex.getCause() instanceof AzureBlobFileSystemException) {
+ ex = (AzureBlobFileSystemException)ex.getCause();
}
- this.lastError = new IOException(ex);
- throw this.lastError;
+ lastError = new IOException(ex);
+ throw lastError;
}
}
- flushWrittenBytesToServiceInternal(this.position, false);
+ flushWrittenBytesToServiceInternal(position, false);
}
private synchronized void flushWrittenBytesToServiceAsync() throws IOException {
@@ -273,7 +281,8 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
this.lastTotalAppendOffset = 0;
}
- private synchronized void flushWrittenBytesToServiceInternal(final long offset, final boolean retainUncommitedData) throws IOException {
+ private synchronized void flushWrittenBytesToServiceInternal(final long offset,
+ final boolean retainUncommitedData) throws IOException {
try {
client.flush(path, offset, retainUncommitedData);
} catch (AzureBlobFileSystemException ex) {
@@ -288,31 +297,33 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
*/
private synchronized void shrinkWriteOperationQueue() throws IOException {
try {
- while (this.writeOperations.peek() != null && this.writeOperations.peek().task.isDone()) {
- this.writeOperations.peek().task.get();
- this.lastTotalAppendOffset += this.writeOperations.peek().length;
- this.writeOperations.remove();
+ while (writeOperations.peek() != null && writeOperations.peek().task.isDone()) {
+ writeOperations.peek().task.get();
+ lastTotalAppendOffset += writeOperations.peek().length;
+ writeOperations.remove();
}
} catch (Exception e) {
- if (AzureBlobFileSystemException.class.isInstance(e.getCause())) {
- this.lastError = IOException.class.cast(e.getCause());
+ if (e.getCause() instanceof AzureBlobFileSystemException) {
+ lastError = (AzureBlobFileSystemException)e.getCause();
} else {
- this.lastError = new IOException(e);
+ lastError = new IOException(e);
}
- throw this.lastError;
+ throw lastError;
}
}
private void waitForTaskToComplete() throws IOException {
boolean completed;
- for (completed = false; this.completionService.poll() != null; completed = true) {}
+ for (completed = false; completionService.poll() != null; completed = true) {
+ // keep polling until there is no data
+ }
if (!completed) {
try {
- this.completionService.take();
+ completionService.take();
} catch (InterruptedException e) {
- this.lastError = new IOException(e);
- throw this.lastError;
+ lastError = (IOException)new InterruptedIOException(e.toString()).initCause(e);
+ throw lastError;
}
}
}
@@ -332,4 +343,4 @@ public class AbfsOutputStream extends OutputStream implements Syncable {
this.length = length;
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
index 17fc35afcb6..61263985002 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
@@ -23,15 +23,16 @@ import java.net.HttpURLConnection;
import java.net.URL;
import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
-import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
+import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidAbfsRestOperationException;
-import org.slf4j.Logger;
-
/**
- * The AbfsRestOperation for Rest AbfsClient
+ * The AbfsRestOperation for Rest AbfsClient.
*/
public class AbfsRestOperation {
// Blob FS client, which has the credentials, retry policy, and logs.
@@ -47,7 +48,7 @@ public class AbfsRestOperation {
// request body and all the download methods have a response body.
private final boolean hasRequestBody;
- private final Logger logger;
+ private final Logger LOG = LoggerFactory.getLogger(AbfsClient.class);
// For uploads, this is the request entity body. For downloads,
// this will hold the response entity body.
@@ -79,7 +80,6 @@ public class AbfsRestOperation {
this.requestHeaders = requestHeaders;
this.hasRequestBody = (AbfsHttpConstants.HTTP_METHOD_PUT.equals(method)
|| AbfsHttpConstants.HTTP_METHOD_PATCH.equals(method));
- this.logger = client.LOG;
}
/**
@@ -150,11 +150,11 @@ public class AbfsRestOperation {
httpOperation.processResponse(buffer, bufferOffset, bufferLength);
} catch (IOException ex) {
- if (logger.isDebugEnabled()) {
+ if (LOG.isDebugEnabled()) {
if (httpOperation != null) {
- logger.debug("HttpRequestFailure: " + httpOperation.toString(), ex);
+ LOG.debug("HttpRequestFailure: " + httpOperation.toString(), ex);
} else {
- logger.debug("HttpRequestFailure: " + method + "," + url, ex);
+ LOG.debug("HttpRequestFailure: " + method + "," + url, ex);
}
}
if (!client.getRetryPolicy().shouldRetry(retryCount, -1)) {
@@ -163,8 +163,8 @@ public class AbfsRestOperation {
return false;
}
- if (logger.isDebugEnabled()) {
- logger.debug("HttpRequest: " + httpOperation.toString());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("HttpRequest: " + httpOperation.toString());
}
if (client.getRetryPolicy().shouldRetry(retryCount, httpOperation.getStatusCode())) {
@@ -175,4 +175,4 @@ public class AbfsRestOperation {
return true;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsUriQueryBuilder.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsUriQueryBuilder.java
index bac66af8824..36248533125 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsUriQueryBuilder.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsUriQueryBuilder.java
@@ -18,13 +18,13 @@
package org.apache.hadoop.fs.azurebfs.services;
-import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
-
import java.util.HashMap;
import java.util.Map;
+import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
+
/**
- * The UrlQueryBuilder for Rest AbfsClient
+ * The UrlQueryBuilder for Rest AbfsClient.
*/
public class AbfsUriQueryBuilder {
private Map parameters;
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java
index 54aa1abd698..5eb7a6639a6 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ExponentialRetryPolicy.java
@@ -43,7 +43,7 @@ public class ExponentialRetryPolicy {
private static final int DEFAULT_MAX_BACKOFF = 1000 * 30;
/**
- *Represents the default minimum amount of time used when calculating the exponential
+ * Represents the default minimum amount of time used when calculating the exponential
* delay between retries.
*/
private static final int DEFAULT_MIN_BACKOFF = 1000 * 3;
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java
index 1fac13dcaa8..00e4f008ad0 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBuffer.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.fs.azurebfs.services;
-import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus;
-
import java.util.concurrent.CountDownLatch;
+import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus;
+
class ReadBuffer {
private AbfsInputStream stream;
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java
index 164e54992ae..5b71cf05225 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManager.java
@@ -28,7 +28,7 @@ import java.util.Stack;
import java.util.concurrent.CountDownLatch;
/**
- * The Read Buffer Manager for Rest AbfsClient
+ * The Read Buffer Manager for Rest AbfsClient.
*/
final class ReadBufferManager {
private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class);
@@ -40,11 +40,11 @@ final class ReadBufferManager {
private Thread[] threads = new Thread[NUM_THREADS];
private byte[][] buffers; // array of byte[] buffers, to hold the data that is read
- private Stack freeList = new Stack(); // indices in buffers[] array that are available
+ private Stack freeList = new Stack<>(); // indices in buffers[] array that are available
- private Queue readAheadQueue = new LinkedList(); // queue of requests that are not picked up by any worker thread yet
- private LinkedList inProgressList = new LinkedList(); // requests being processed by worker threads
- private LinkedList completedReadList = new LinkedList(); // buffers available for reading
+ private Queue readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet
+ private LinkedList inProgressList = new LinkedList<>(); // requests being processed by worker threads
+ private LinkedList completedReadList = new LinkedList<>(); // buffers available for reading
private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block
static {
@@ -85,7 +85,7 @@ final class ReadBufferManager {
/**
- * {@link AbfsInputStream} calls this method to queue read-aheads
+ * {@link AbfsInputStream} calls this method to queue read-aheads.
*
* @param stream The {@link AbfsInputStream} for which to do the read-ahead
* @param requestedOffset The offset in the file which shoukd be read
@@ -93,15 +93,15 @@ final class ReadBufferManager {
*/
void queueReadAhead(final AbfsInputStream stream, final long requestedOffset, final int requestedLength) {
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Start Queueing readAhead for " + stream.getPath() + " offset " + requestedOffset
- + " length " + requestedLength);
+ LOGGER.trace("Start Queueing readAhead for {} offset {} length {}",
+ stream.getPath(), requestedOffset, requestedLength);
}
ReadBuffer buffer;
synchronized (this) {
if (isAlreadyQueued(stream, requestedOffset)) {
return; // already queued, do not queue again
}
- if (freeList.size() == 0 && !tryEvict()) {
+ if (freeList.isEmpty() && !tryEvict()) {
return; // no buffers available, cannot queue anything
}
@@ -121,8 +121,8 @@ final class ReadBufferManager {
notifyAll();
}
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Done q-ing readAhead for file " + stream.getPath() + " offset " + requestedOffset
- + " buffer idx " + buffer.getBufferindex());
+ LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}",
+ stream.getPath(), requestedOffset, buffer.getBufferindex());
}
}
@@ -144,7 +144,8 @@ final class ReadBufferManager {
int getBlock(final AbfsInputStream stream, final long position, final int length, final byte[] buffer) {
// not synchronized, so have to be careful with locking
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("getBlock for file " + stream.getPath() + " position " + position + " thread " + Thread.currentThread().getName());
+ LOGGER.trace("getBlock for file {} position {} thread {}",
+ stream.getPath(), position, Thread.currentThread().getName());
}
waitForProcess(stream, position);
@@ -155,12 +156,13 @@ final class ReadBufferManager {
}
if (bytesRead > 0) {
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Done read from Cache for " + stream.getPath() + " position " + position + " length " + bytesRead);
+ LOGGER.trace("Done read from Cache for {} position {} length {}",
+ stream.getPath(), position, bytesRead);
}
return bytesRead;
}
- // otherwise, just say we got nothing - calling thread can do it's own read
+ // otherwise, just say we got nothing - calling thread can do its own read
return 0;
}
@@ -179,8 +181,8 @@ final class ReadBufferManager {
if (readBuf != null) { // if in in-progress queue, then block for it
try {
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("got a relevant read buffer for file " + stream.getPath() + " offset " + readBuf.getOffset()
- + " buffer idx " + readBuf.getBufferindex());
+ LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}",
+ stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex());
}
readBuf.getLatch().await(); // blocking wait on the caller stream's thread
// Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread
@@ -193,8 +195,8 @@ final class ReadBufferManager {
Thread.currentThread().interrupt();
}
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("latch done for file " + stream.getPath() + " buffer idx " + readBuf.getBufferindex()
- + " length " + readBuf.getLength());
+ LOGGER.trace("latch done for file {} buffer idx {} length {}",
+ stream.getPath(), readBuf.getBufferindex(), readBuf.getLength());
}
}
}
@@ -254,8 +256,8 @@ final class ReadBufferManager {
freeList.push(buf.getBufferindex());
completedReadList.remove(buf);
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Evicting buffer idx " + buf.getBufferindex() + "; was used for file " + buf.getStream().getPath()
- + " offset " + buf.getOffset() + " length " + buf.getLength());
+ LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}",
+ buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength());
}
return true;
}
@@ -344,13 +346,14 @@ final class ReadBufferManager {
inProgressList.add(buffer);
}
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("ReadBufferWorker picked file " + buffer.getStream().getPath() + " for offset " + buffer.getOffset());
+ LOGGER.trace("ReadBufferWorker picked file {} for offset {}",
+ buffer.getStream().getPath(), buffer.getOffset());
}
return buffer;
}
/**
- * ReadBufferWorker thread calls this method to post completion
+ * ReadBufferWorker thread calls this method to post completion.
*
* @param buffer the buffer whose read was completed
* @param result the {@link ReadBufferStatus} after the read operation in the worker thread
@@ -358,8 +361,8 @@ final class ReadBufferManager {
*/
void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) {
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("ReadBufferWorker completed file " + buffer.getStream().getPath() + " for offset " + buffer.getOffset()
- + " bytes " + bytesActuallyRead);
+ LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}",
+ buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead);
}
synchronized (this) {
inProgressList.remove(buffer);
@@ -380,8 +383,9 @@ final class ReadBufferManager {
/**
* Similar to System.currentTimeMillis, except implemented with System.nanoTime().
* System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization),
- * making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing,
- * so it is much more suitable to measuring intervals.
+ * making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core.
+ * Note: it is not monotonic across Sockets, and even within a CPU, its only the
+ * more recent parts which share a clock across all cores.
*
* @return current time in milliseconds
*/
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java
index 2d0c96e15c0..af69de0f089 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferWorker.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.fs.azurebfs.services;
-import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus;
-
import java.util.concurrent.CountDownLatch;
+import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus;
+
class ReadBufferWorker implements Runnable {
protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1);
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SharedKeyCredentials.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SharedKeyCredentials.java
index dd598921915..105a1a2dbf6 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SharedKeyCredentials.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SharedKeyCredentials.java
@@ -22,6 +22,7 @@ import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import java.io.UnsupportedEncodingException;
import java.net.HttpURLConnection;
+import java.net.URL;
import java.net.URLDecoder;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
@@ -38,11 +39,11 @@ import java.util.TimeZone;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.apache.commons.codec.Charsets;
+import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.codec.Charsets;
/**
* Represents the shared key credentials used to access an Azure Storage
* account.
@@ -89,7 +90,7 @@ public class SharedKeyCredentials {
}
private String computeHmac256(final String stringToSign) {
- byte[] utf8Bytes = null;
+ byte[] utf8Bytes;
try {
utf8Bytes = stringToSign.getBytes(AbfsHttpConstants.UTF_8);
} catch (final UnsupportedEncodingException e) {
@@ -158,7 +159,7 @@ public class SharedKeyCredentials {
}
/**
- * Initialie the HmacSha256 associated with the account key.
+ * Initialize the HmacSha256 associated with the account key.
*/
private void initializeMac() {
// Initializes the HMAC-SHA256 Mac and SecretKey.
@@ -171,7 +172,7 @@ public class SharedKeyCredentials {
}
/**
- * Append a string to a string builder with a newline constant
+ * Append a string to a string builder with a newline constant.
*
* @param builder the StringBuilder object
* @param element the string to append.
@@ -194,9 +195,10 @@ public class SharedKeyCredentials {
* @param conn the HttpURLConnection for the operation.
* @return A canonicalized string.
*/
- private static String canonicalizeHttpRequest(final java.net.URL address, final String accountName,
- final String method, final String contentType, final long contentLength, final String date,
- final HttpURLConnection conn) throws UnsupportedEncodingException {
+ private static String canonicalizeHttpRequest(final URL address,
+ final String accountName, final String method, final String contentType,
+ final long contentLength, final String date, final HttpURLConnection conn)
+ throws UnsupportedEncodingException {
// The first element should be the Method of the request.
// I.e. GET, POST, PUT, or HEAD.
@@ -246,7 +248,8 @@ public class SharedKeyCredentials {
* @param accountName the account name for the request.
* @return the canonicalized resource string.
*/
- private static String getCanonicalizedResource(final java.net.URL address, final String accountName) throws UnsupportedEncodingException {
+ private static String getCanonicalizedResource(final URL address,
+ final String accountName) throws UnsupportedEncodingException {
// Resource path
final StringBuilder resourcepath = new StringBuilder(AbfsHttpConstants.FORWARD_SLASH);
resourcepath.append(accountName);
@@ -263,7 +266,7 @@ public class SharedKeyCredentials {
final Map queryVariables = parseQueryString(address.getQuery());
- final Map lowercasedKeyNameValue = new HashMap();
+ final Map lowercasedKeyNameValue = new HashMap<>();
for (final Entry entry : queryVariables.entrySet()) {
// sort the value and organize it as comma separated values
@@ -303,14 +306,17 @@ public class SharedKeyCredentials {
}
/**
- * Gets all the values for the given header in the one to many map, performs a trimStart() on each return value
+ * Gets all the values for the given header in the one to many map,
+ * performs a trimStart() on each return value.
*
* @param headers a one to many map of key / values representing the header values for the connection.
* @param headerName the name of the header to lookup
* @return an ArrayList of all trimmed values corresponding to the requested headerName. This may be empty
* if the header is not found.
*/
- private static ArrayList getHeaderValues(final Map> headers, final String headerName) {
+ private static ArrayList getHeaderValues(
+ final Map> headers,
+ final String headerName) {
final ArrayList arrayOfValues = new ArrayList();
List values = null;
@@ -338,7 +344,7 @@ public class SharedKeyCredentials {
* @return a HashMap of the key values.
*/
private static HashMap parseQueryString(String parseString) throws UnsupportedEncodingException {
- final HashMap retVals = new HashMap();
+ final HashMap retVals = new HashMap<>();
if (parseString == null || parseString.isEmpty()) {
return retVals;
}
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
new file mode 100644
index 00000000000..a4b3483de70
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
@@ -0,0 +1,72 @@
+
+
+# Hadoop Azure Support: ABFS — Azure Data Lake Storage Gen2
+
+
+
+## Introduction
+
+The `hadoop-azure` module provides support for the Azure Data Lake Storage Gen2
+storage layer through the "abfs" connector
+
+To make it part of Apache Hadoop's default classpath, simply make sure that
+`HADOOP_OPTIONAL_TOOLS` in `hadoop-env.sh` has `hadoop-azure` in the list.
+
+## Features
+
+* Read and write data stored in an Azure Blob Storage account.
+* *Fully Consistent* view of the storage across all clients.
+* Can read data written through the wasb: connector.
+* Present a hierarchical file system view by implementing the standard Hadoop
+ [`FileSystem`](../api/org/apache/hadoop/fs/FileSystem.html) interface.
+* Supports configuration of multiple Azure Blob Storage accounts.
+* Can act as a source or destination of data in Hadoop MapReduce, Apache Hive, Apache Spark
+* Tested at scale on both Linux and Windows.
+* Can be used as a replacement for HDFS on Hadoop clusters deployed in Azure infrastructure.
+
+
+
+## Limitations
+
+* File last access time is not tracked.
+
+
+## Technical notes
+
+### Security
+
+### Consistency and Concurrency
+
+*TODO*: complete/review
+
+The abfs client has a fully consistent view of the store, which has complete Create Read Update and Delete consistency for data and metadata.
+(Compare and contrast with S3 which only offers Create consistency; S3Guard adds CRUD to metadata, but not the underlying data).
+
+### Performance
+
+*TODO*: check these.
+
+* File Rename: `O(1)`.
+* Directory Rename: `O(files)`.
+* Directory Delete: `O(files)`.
+
+## Testing ABFS
+
+See the relevant section in [Testing Azure](testing_azure.html).
+
+## References
+
+* [A closer look at Azure Data Lake Storage Gen2](https://azure.microsoft.com/en-gb/blog/a-closer-look-at-azure-data-lake-storage-gen2/);
+MSDN Article from June 28, 2018.
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
index b58e68be5f3..c148807aaa0 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/testing_azure.md
@@ -574,3 +574,79 @@ mvn test -Dtest=CleanupTestContainers
This will delete the containers; the output log of the test run will
provide the details and summary of the operation.
+
+
+## Testing ABFS
+
+The ABFS Connector tests share the same account as the wasb tests; this is
+needed for cross-connector compatibility tests.
+
+This makes for a somewhat complex set of configuration options.
+
+Here are the settings for an account `ACCOUNTNAME`
+
+```xml
+
+ abfs.account.name
+ ACCOUNTNAME
+
+
+
+ abfs.account.full.name
+ ${abfs.account.name}.dfs.core.windows.net
+
+
+
+ abfs.account.key
+ SECRETKEY==
+
+
+
+ fs.azure.account.key.ACCOUNTNAME.dfs.core.windows.net
+ ${abfs.account.key}
+
+
+
+ fs.azure.account.key.ACCOUNTNAME.blob.core.windows.net
+ ${abfs.account.key}
+
+
+
+ fs.azure.test.account.key.ACCOUNTNAME.dfs.core.windows.net
+ ${abfs.account.key}
+
+
+
+ fs.azure.test.account.key.ACCOUNTNAME.blob.core.windows.net
+ ${abfs.account.key}
+
+
+
+ fs.azure.account.key.ACCOUNTNAME
+ ${abfs.account.key}
+
+
+
+ fs.azure.test.account.key.ACCOUNTNAME
+ ${abfs.account.key}
+
+
+
+ fs.azure.test.account.name
+ ${abfs.account.full.name}
+
+
+
+ fs.contract.test.fs.abfs
+ abfs://TESTCONTAINER@ACCOUNTNAME.dfs.core.windows.net
+ Container for contract tests
+
+
+
+ fs.contract.test.fs.abfss
+ abfss://TESTCONTAINER@ACCOUNTNAME.dfs.core.windows.net
+ Container for contract tests
+
+
+
+```
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
index fd21bd20b2e..db4a843287a 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractAppend.java
@@ -18,10 +18,19 @@
package org.apache.hadoop.fs.azure.contract;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import org.junit.Test;
+
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/**
* Append test, skipping one of them.
@@ -38,4 +47,18 @@ public class ITestAzureNativeContractAppend extends AbstractContractAppendTest {
public void testRenameFileBeingAppended() throws Throwable {
skip("Skipping as renaming an opened file is not supported");
}
+
+ /**
+ * Wasb returns a different exception, so change the intercept logic here.
+ */
+ @Override
+ @Test
+ public void testAppendDirectory() throws Exception {
+ final FileSystem fs = getFileSystem();
+
+ final Path folderPath = path("testAppendDirectory");
+ fs.mkdirs(folderPath);
+ intercept(FileNotFoundException.class,
+ () -> fs.append(folderPath));
+ }
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java
new file mode 100644
index 00000000000..106fa09e438
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Hashtable;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+
+import com.google.common.base.Preconditions;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.AbstractWasbTestWithTimeout;
+import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
+import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys;
+import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
+import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
+import org.apache.hadoop.fs.azurebfs.utils.UriUtils;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.*;
+import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.FILE_SYSTEM_NOT_FOUND;
+import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.*;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.junit.Assume.assumeTrue;
+import static org.hamcrest.CoreMatchers.*;
+
+/**
+ * Base for AzureBlobFileSystem Integration tests.
+ *
+ * Important: This is for integration tests only.
+ */
+public abstract class AbstractAbfsIntegrationTest extends
+ AbstractWasbTestWithTimeout {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AbstractAbfsIntegrationTest.class);
+
+ private final boolean isEmulator;
+ private NativeAzureFileSystem wasb;
+ private AzureBlobFileSystem abfs;
+ private String abfsScheme;
+
+ private Configuration configuration;
+ private String fileSystemName;
+ private String accountName;
+ private String testUrl;
+
+ protected AbstractAbfsIntegrationTest(final boolean secure) {
+ this(secure ? FileSystemUriSchemes.ABFS_SECURE_SCHEME : FileSystemUriSchemes.ABFS_SCHEME);
+ }
+
+ protected AbstractAbfsIntegrationTest() {
+ this(FileSystemUriSchemes.ABFS_SCHEME);
+ }
+
+ private AbstractAbfsIntegrationTest(final String scheme) {
+ abfsScheme = scheme;
+ fileSystemName = ABFS_TEST_CONTAINER_PREFIX + UUID.randomUUID().toString();
+ configuration = new Configuration();
+ configuration.addResource(ABFS_TEST_RESOURCE_XML);
+
+ String accountName = configuration.get(FS_AZURE_TEST_ACCOUNT_NAME, "");
+ assumeTrue("Not set: " + FS_AZURE_TEST_ACCOUNT_NAME,
+ !accountName.isEmpty());
+ assertThat("The key in " + FS_AZURE_TEST_ACCOUNT_KEY_PREFIX
+ + " is not bound to an ABFS account",
+ accountName, containsString("dfs.core.windows.net"));
+ String fullKey = FS_AZURE_TEST_ACCOUNT_KEY_PREFIX
+ + accountName;
+ assumeTrue("Not set: " + fullKey,
+ configuration.get(fullKey) != null);
+
+ final String abfsUrl = this.getFileSystemName() + "@" + this.getAccountName();
+ URI defaultUri = null;
+
+ try {
+ defaultUri = new URI(abfsScheme, abfsUrl, null, null, null);
+ } catch (Exception ex) {
+ throw new AssertionError(ex);
+ }
+
+ this.testUrl = defaultUri.toString();
+ configuration.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultUri.toString());
+ configuration.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true);
+ this.isEmulator = this.configuration.getBoolean(FS_AZURE_EMULATOR_ENABLED, false);
+ this.accountName = this.configuration.get(FS_AZURE_TEST_ACCOUNT_NAME);
+ }
+
+
+ @Before
+ public void setup() throws Exception {
+ //Create filesystem first to make sure getWasbFileSystem() can return an existing filesystem.
+ createFileSystem();
+
+ if (!isEmulator) {
+ final URI wasbUri = new URI(abfsUrlToWasbUrl(getTestUrl()));
+ final AzureNativeFileSystemStore azureNativeFileSystemStore =
+ new AzureNativeFileSystemStore();
+ azureNativeFileSystemStore.initialize(
+ wasbUri,
+ getConfiguration(),
+ new AzureFileSystemInstrumentation(getConfiguration()));
+
+ wasb = new NativeAzureFileSystem(azureNativeFileSystemStore);
+ wasb.initialize(wasbUri, configuration);
+ }
+ }
+
+ @After
+ public void teardown() throws Exception {
+ try {
+ IOUtils.closeStream(wasb);
+ wasb = null;
+
+ if (abfs == null) {
+ return;
+ }
+
+ final AzureBlobFileSystemStore abfsStore = abfs.getAbfsStore();
+ abfsStore.deleteFilesystem();
+
+ AbfsRestOperationException ex = intercept(
+ AbfsRestOperationException.class,
+ new Callable>() {
+ @Override
+ public Hashtable call() throws Exception {
+ return abfsStore.getFilesystemProperties();
+ }
+ });
+ if (FILE_SYSTEM_NOT_FOUND.getStatusCode() != ex.getStatusCode()) {
+ LOG.warn("Deleted test filesystem may still exist: {}", abfs, ex);
+ }
+ } catch (Exception e) {
+ LOG.warn("During cleanup: {}", e, e);
+ } finally {
+ IOUtils.closeStream(abfs);
+ abfs = null;
+ }
+ }
+
+ public AzureBlobFileSystem getFileSystem() throws IOException {
+ return abfs;
+ }
+
+ /**
+ * Creates the filesystem; updates the {@link #abfs} field.
+ * @return the created filesystem.
+ * @throws IOException failure during create/init.
+ */
+ public AzureBlobFileSystem createFileSystem() throws IOException {
+ Preconditions.checkState(abfs == null,
+ "existing ABFS instance exists: %s", abfs);
+ abfs = (AzureBlobFileSystem) FileSystem.newInstance(configuration);
+ return abfs;
+ }
+
+
+ protected NativeAzureFileSystem getWasbFileSystem() {
+ return wasb;
+ }
+
+ protected String getHostName() {
+ return configuration.get(FS_AZURE_TEST_HOST_NAME);
+ }
+
+ protected void setTestUrl(String testUrl) {
+ this.testUrl = testUrl;
+ }
+
+ protected String getTestUrl() {
+ return testUrl;
+ }
+
+ protected void setFileSystemName(String fileSystemName) {
+ this.fileSystemName = fileSystemName;
+ }
+ protected String getFileSystemName() {
+ return fileSystemName;
+ }
+
+ protected String getAccountName() {
+ return configuration.get(FS_AZURE_TEST_ACCOUNT_NAME);
+ }
+
+ protected String getAccountKey() {
+ return configuration.get(
+ FS_AZURE_TEST_ACCOUNT_KEY_PREFIX
+ + getAccountName());
+ }
+
+ protected Configuration getConfiguration() {
+ return configuration;
+ }
+
+ protected boolean isEmulator() {
+ return isEmulator;
+ }
+
+ /**
+ * Write a buffer to a file.
+ * @param path path
+ * @param buffer buffer
+ * @throws IOException failure
+ */
+ protected void write(Path path, byte[] buffer) throws IOException {
+ ContractTestUtils.writeDataset(getFileSystem(), path, buffer, buffer.length,
+ CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT, false);
+ }
+
+ /**
+ * Touch a file in the test store. Will overwrite any existing file.
+ * @param path path
+ * @throws IOException failure.
+ */
+ protected void touch(Path path) throws IOException {
+ ContractTestUtils.touch(getFileSystem(), path);
+ }
+
+ protected static String wasbUrlToAbfsUrl(final String wasbUrl) {
+ return convertTestUrls(
+ wasbUrl, FileSystemUriSchemes.WASB_SCHEME, FileSystemUriSchemes.WASB_SECURE_SCHEME, FileSystemUriSchemes.WASB_DNS_PREFIX,
+ FileSystemUriSchemes.ABFS_SCHEME, FileSystemUriSchemes.ABFS_SECURE_SCHEME, FileSystemUriSchemes.ABFS_DNS_PREFIX);
+ }
+
+ protected static String abfsUrlToWasbUrl(final String abfsUrl) {
+ return convertTestUrls(
+ abfsUrl, FileSystemUriSchemes.ABFS_SCHEME, FileSystemUriSchemes.ABFS_SECURE_SCHEME, FileSystemUriSchemes.ABFS_DNS_PREFIX,
+ FileSystemUriSchemes.WASB_SCHEME, FileSystemUriSchemes.WASB_SECURE_SCHEME, FileSystemUriSchemes.WASB_DNS_PREFIX);
+ }
+
+ private static String convertTestUrls(
+ final String url,
+ final String fromNonSecureScheme,
+ final String fromSecureScheme,
+ final String fromDnsPrefix,
+ final String toNonSecureScheme,
+ final String toSecureScheme,
+ final String toDnsPrefix) {
+ String data = null;
+ if (url.startsWith(fromNonSecureScheme + "://")) {
+ data = url.replace(fromNonSecureScheme + "://", toNonSecureScheme + "://");
+ } else if (url.startsWith(fromSecureScheme + "://")) {
+ data = url.replace(fromSecureScheme + "://", toSecureScheme + "://");
+ }
+
+
+ if (data != null) {
+ data = data.replace("." + fromDnsPrefix + ".",
+ "." + toDnsPrefix + ".");
+ }
+ return data;
+ }
+
+ public Path getTestPath() {
+ Path path = new Path(UriUtils.generateUniqueTestPath());
+ return path;
+ }
+
+ /**
+ * Create a path under the test path provided by
+ * {@link #getTestPath()}.
+ * @param filepath path string in
+ * @return a path qualified by the test filesystem
+ * @throws IOException IO problems
+ */
+ protected Path path(String filepath) throws IOException {
+ return getFileSystem().makeQualified(
+ new Path(getTestPath(), filepath));
+ }
+
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsScaleTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsScaleTest.java
new file mode 100644
index 00000000000..cfda7a7eef9
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsScaleTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
+
+import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.assumeScaleTestsEnabled;
+
+/**
+ * Integration tests at bigger scale; configurable as to
+ * size, off by default.
+ */
+public class AbstractAbfsScaleTest extends AbstractAbfsIntegrationTest {
+
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(AbstractAbfsScaleTest.class);
+
+ @Override
+ protected int getTestTimeoutMillis() {
+ return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS;
+ }
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ LOG.debug("Scale test operation count = {}", getOperationCount());
+ assumeScaleTestsEnabled(getConfiguration());
+ }
+
+ protected long getOperationCount() {
+ return getConfiguration().getLong(AzureTestConstants.KEY_OPERATION_COUNT,
+ AzureTestConstants.DEFAULT_OPERATION_COUNT);
+ }
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/DependencyInjectedTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/DependencyInjectedTest.java
deleted file mode 100644
index 74a530c9cce..00000000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/DependencyInjectedTest.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azurebfs;
-
-import java.net.URI;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
-import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
-import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
-import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys;
-import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
-import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys;
-
-import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.FILE_SYSTEM_NOT_FOUND;
-import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assume.assumeNotNull;
-
-/**
- * Provide dependencies for AzureBlobFileSystem tests.
- */
-public abstract class DependencyInjectedTest {
- private final boolean isEmulator;
- private NativeAzureFileSystem wasb;
- private String abfsScheme;
-
- private Configuration configuration;
- private String fileSystemName;
- private String accountName;
- private String testUrl;
-
- public static final String TEST_CONTAINER_PREFIX = "abfs-testcontainer-";
-
- public DependencyInjectedTest(final boolean secure) {
- this(secure ? FileSystemUriSchemes.ABFS_SECURE_SCHEME : FileSystemUriSchemes.ABFS_SCHEME);
- }
-
- protected DependencyInjectedTest() {
- this(FileSystemUriSchemes.ABFS_SCHEME);
- }
-
- private DependencyInjectedTest(final String scheme) {
- abfsScheme = scheme;
- fileSystemName = TEST_CONTAINER_PREFIX + UUID.randomUUID().toString();
- configuration = new Configuration();
- configuration.addResource("azure-bfs-test.xml");
-
- assumeNotNull(configuration.get(TestConfigurationKeys.FS_AZURE_TEST_ACCOUNT_NAME));
- assumeNotNull(configuration.get(TestConfigurationKeys.FS_AZURE_TEST_ACCOUNT_KEY_PREFIX + configuration.get(TestConfigurationKeys
- .FS_AZURE_TEST_ACCOUNT_NAME)));
-
- final String abfsUrl = this.getFileSystemName() + "@" + this.getAccountName();
- URI defaultUri = null;
-
- try {
- defaultUri = new URI(abfsScheme, abfsUrl, null, null, null);
- } catch (Exception ex) {
- Assert.fail(ex.getMessage());
- }
-
- this.testUrl = defaultUri.toString();
- configuration.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultUri.toString());
- configuration.setBoolean(ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true);
- this.isEmulator = this.configuration.getBoolean(ConfigurationKeys.FS_AZURE_EMULATOR_ENABLED, false);
- this.accountName = this.configuration.get(TestConfigurationKeys.FS_AZURE_TEST_ACCOUNT_NAME);
- }
-
- @Before
- public void initialize() throws Exception {
- //Create filesystem first to make sure getWasbFileSystem() can return an existed filesystem.
- this.getFileSystem();
-
- if (!this.isEmulator) {
- final URI wasbUri = new URI(abfsUrlToWasbUrl(this.getTestUrl()));
- final AzureNativeFileSystemStore azureNativeFileSystemStore = new AzureNativeFileSystemStore();
- azureNativeFileSystemStore.initialize(
- wasbUri,
- this.getConfiguration(),
- new AzureFileSystemInstrumentation(this.getConfiguration()));
-
- this.wasb = new NativeAzureFileSystem(azureNativeFileSystemStore);
- this.wasb.initialize(wasbUri, configuration);
- }
- }
-
- @After
- public void testCleanup() throws Exception {
- if (this.wasb != null) {
- this.wasb.close();
- }
-
- FileSystem.closeAll();
-
- final AzureBlobFileSystem fs = this.getFileSystem();
- final AzureBlobFileSystemStore abfsStore = fs.getAbfsStore();
- abfsStore.deleteFilesystem();
-
- AbfsRestOperationException ex = intercept(
- AbfsRestOperationException.class,
- new Callable() {
- @Override
- public Void call() throws Exception {
- fs.getAbfsStore().getFilesystemProperties();
- return null;
- }
- });
-
- assertEquals(FILE_SYSTEM_NOT_FOUND.getStatusCode(), ex.getStatusCode());
- }
-
- public AzureBlobFileSystem getFileSystem() throws Exception {
- return (AzureBlobFileSystem) FileSystem.get(this.configuration);
- }
-
- protected NativeAzureFileSystem getWasbFileSystem() {
- return this.wasb;
- }
-
- protected String getHostName() {
- return configuration.get(TestConfigurationKeys.FS_AZURE_TEST_HOST_NAME);
- }
-
- protected void updateTestUrl(String testUrl) {
- this.testUrl = testUrl;
- }
- protected String getTestUrl() {
- return testUrl;
- }
-
- protected void updateFileSystemName(String fileSystemName) {
- this.fileSystemName = fileSystemName;
- }
- protected String getFileSystemName() {
- return fileSystemName;
- }
-
- protected String getAccountName() {
- return configuration.get(TestConfigurationKeys.FS_AZURE_TEST_ACCOUNT_NAME);
- }
-
- protected String getAccountKey() {
- return configuration.get(
- TestConfigurationKeys.FS_AZURE_TEST_ACCOUNT_KEY_PREFIX
- + getAccountName());
- }
-
- protected Configuration getConfiguration() {
- return this.configuration;
- }
-
- protected boolean isEmulator() {
- return isEmulator;
- }
-
- protected static String wasbUrlToAbfsUrl(final String wasbUrl) {
- return convertTestUrls(
- wasbUrl, FileSystemUriSchemes.WASB_SCHEME, FileSystemUriSchemes.WASB_SECURE_SCHEME, FileSystemUriSchemes.WASB_DNS_PREFIX,
- FileSystemUriSchemes.ABFS_SCHEME, FileSystemUriSchemes.ABFS_SECURE_SCHEME, FileSystemUriSchemes.ABFS_DNS_PREFIX);
- }
-
- protected static String abfsUrlToWasbUrl(final String abfsUrl) {
- return convertTestUrls(
- abfsUrl, FileSystemUriSchemes.ABFS_SCHEME, FileSystemUriSchemes.ABFS_SECURE_SCHEME, FileSystemUriSchemes.ABFS_DNS_PREFIX,
- FileSystemUriSchemes.WASB_SCHEME, FileSystemUriSchemes.WASB_SECURE_SCHEME, FileSystemUriSchemes.WASB_DNS_PREFIX);
- }
-
- private static String convertTestUrls(
- final String url, final String fromNonSecureScheme, final String fromSecureScheme, final String fromDnsPrefix,
- final String toNonSecureScheme, final String toSecureScheme, final String toDnsPrefix) {
- String data = null;
- if (url.startsWith(fromNonSecureScheme + "://")) {
- data = url.replace(fromNonSecureScheme + "://", toNonSecureScheme + "://");
- } else if (url.startsWith(fromSecureScheme + "://")) {
- data = url.replace(fromSecureScheme + "://", toSecureScheme + "://");
- }
-
- data = data.replace("." + fromDnsPrefix + ".", "." + toDnsPrefix + ".");
- return data;
- }
-}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java
index 10d42d1399d..f2e26ec19f2 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java
@@ -25,13 +25,13 @@ import org.junit.Test;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
-
-import static org.junit.Assert.assertEquals;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
/**
* Test append operations.
*/
-public class ITestAzureBlobFileSystemAppend extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemAppend extends
+ AbstractAbfsIntegrationTest {
private static final Path TEST_FILE_PATH = new Path("testfile");
private static final Path TEST_FOLDER_PATH = new Path("testFolder");
public ITestAzureBlobFileSystemAppend() {
@@ -40,7 +40,7 @@ public class ITestAzureBlobFileSystemAppend extends DependencyInjectedTest {
@Test(expected = FileNotFoundException.class)
public void testAppendDirShouldFail() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
final Path filePath = TEST_FILE_PATH;
fs.mkdirs(filePath);
fs.append(filePath, 0);
@@ -48,21 +48,21 @@ public class ITestAzureBlobFileSystemAppend extends DependencyInjectedTest {
@Test
public void testAppendWithLength0() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- FSDataOutputStream stream = fs.create(TEST_FILE_PATH);
- final byte[] b = new byte[1024];
- new Random().nextBytes(b);
- stream.write(b, 1000, 0);
-
- assertEquals(0, stream.getPos());
+ final AzureBlobFileSystem fs = getFileSystem();
+ try(FSDataOutputStream stream = fs.create(TEST_FILE_PATH)) {
+ final byte[] b = new byte[1024];
+ new Random().nextBytes(b);
+ stream.write(b, 1000, 0);
+ assertEquals(0, stream.getPos());
+ }
}
@Test(expected = FileNotFoundException.class)
public void testAppendFileAfterDelete() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
final Path filePath = TEST_FILE_PATH;
- fs.create(filePath);
+ ContractTestUtils.touch(fs, filePath);
fs.delete(filePath, false);
fs.append(filePath);
@@ -70,7 +70,7 @@ public class ITestAzureBlobFileSystemAppend extends DependencyInjectedTest {
@Test(expected = FileNotFoundException.class)
public void testAppendDirectory() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
final Path folderPath = TEST_FOLDER_PATH;
fs.mkdirs(folderPath);
fs.append(folderPath);
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java
index d107c9d008b..d6964814cc6 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java
@@ -27,13 +27,11 @@ import org.junit.Test;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
/**
* Test AzureBlobFileSystem back compatibility with WASB.
*/
-public class ITestAzureBlobFileSystemBackCompat extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemBackCompat extends
+ AbstractAbfsIntegrationTest {
public ITestAzureBlobFileSystemBackCompat() {
super();
}
@@ -54,13 +52,13 @@ public class ITestAzureBlobFileSystemBackCompat extends DependencyInjectedTest {
blockBlob.uploadText("");
FileStatus[] fileStatuses = fs.listStatus(new Path("/test/10/"));
- assertEquals(fileStatuses.length, 2);
- assertEquals(fileStatuses[0].getPath().getName(), "10");
+ assertEquals(2, fileStatuses.length);
+ assertEquals("10", fileStatuses[0].getPath().getName());
assertTrue(fileStatuses[0].isDirectory());
- assertEquals(fileStatuses[0].getLen(), 0);
- assertEquals(fileStatuses[1].getPath().getName(), "123");
+ assertEquals(0, fileStatuses[0].getLen());
+ assertEquals("123", fileStatuses[1].getPath().getName());
assertTrue(fileStatuses[1].isDirectory());
- assertEquals(fileStatuses[1].getLen(), 0);
+ assertEquals(0, fileStatuses[1].getLen());
}
private String getBlobConnectionString() {
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java
index c158e03e622..90eff97854c 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java
@@ -33,30 +33,29 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
/**
* Test copy operation.
*/
-public class ITestAzureBlobFileSystemCopy extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemCopy extends AbstractAbfsIntegrationTest {
public ITestAzureBlobFileSystemCopy() {
super();
}
@Test
public void testCopyFromLocalFileSystem() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
Path localFilePath = new Path(System.getProperty("test.build.data",
"azure_test"));
- FileSystem localFs = FileSystem.get(new Configuration());
+ FileSystem localFs = FileSystem.getLocal(new Configuration());
localFs.delete(localFilePath, true);
try {
writeString(localFs, localFilePath, "Testing");
Path dstPath = new Path("copiedFromLocal");
assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false,
fs.getConf()));
- assertTrue(fs.exists(dstPath));
+ assertIsFile(fs, dstPath);
assertEquals("Testing", readString(fs, dstPath));
fs.delete(dstPath, true);
} finally {
@@ -65,36 +64,32 @@ public class ITestAzureBlobFileSystemCopy extends DependencyInjectedTest {
}
private String readString(FileSystem fs, Path testFile) throws IOException {
- FSDataInputStream inputStream = fs.open(testFile);
- String ret = readString(inputStream);
- inputStream.close();
- return ret;
+ return readString(fs.open(testFile));
}
private String readString(FSDataInputStream inputStream) throws IOException {
- BufferedReader reader = new BufferedReader(new InputStreamReader(
- inputStream));
- final int bufferSize = 1024;
- char[] buffer = new char[bufferSize];
- int count = reader.read(buffer, 0, bufferSize);
- if (count > bufferSize) {
- throw new IOException("Exceeded buffer size");
+ try (BufferedReader reader = new BufferedReader(new InputStreamReader(
+ inputStream))) {
+ final int bufferSize = 1024;
+ char[] buffer = new char[bufferSize];
+ int count = reader.read(buffer, 0, bufferSize);
+ if (count > bufferSize) {
+ throw new IOException("Exceeded buffer size");
+ }
+ return new String(buffer, 0, count);
}
- inputStream.close();
- return new String(buffer, 0, count);
}
private void writeString(FileSystem fs, Path path, String value)
throws IOException {
- FSDataOutputStream outputStream = fs.create(path, true);
- writeString(outputStream, value);
+ writeString(fs.create(path, true), value);
}
private void writeString(FSDataOutputStream outputStream, String value)
throws IOException {
- BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
- outputStream));
- writer.write(value);
- writer.close();
+ try(BufferedWriter writer = new BufferedWriter(
+ new OutputStreamWriter(outputStream))) {
+ writer.write(value);
+ }
}
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java
index c9b99e6d8a2..1e43f9a3601 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java
@@ -24,18 +24,17 @@ import java.util.EnumSet;
import org.junit.Test;
import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
/**
* Test create operation.
*/
-public class ITestAzureBlobFileSystemCreate extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemCreate extends
+ AbstractAbfsIntegrationTest {
private static final Path TEST_FILE_PATH = new Path("testfile");
private static final Path TEST_FOLDER_PATH = new Path("testFolder");
private static final String TEST_CHILD_FILE = "childFile";
@@ -43,68 +42,65 @@ public class ITestAzureBlobFileSystemCreate extends DependencyInjectedTest {
super();
}
- @Test(expected = FileAlreadyExistsException.class)
- public void testCreateFileWithExistingDir() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.mkdirs(TEST_FOLDER_PATH);
- fs.create(TEST_FOLDER_PATH);
- }
-
@Test
- public void testEnsureFileCreated() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(TEST_FILE_PATH);
-
- FileStatus fileStatus = fs.getFileStatus(TEST_FILE_PATH);
- assertNotNull(fileStatus);
+ public void testEnsureFileCreatedImmediately() throws Exception {
+ final AzureBlobFileSystem fs = getFileSystem();
+ FSDataOutputStream out = fs.create(TEST_FILE_PATH);
+ try {
+ assertIsFile(fs, TEST_FILE_PATH);
+ } finally {
+ out.close();
+ }
+ assertIsFile(fs, TEST_FILE_PATH);
}
@Test
@SuppressWarnings("deprecation")
public void testCreateNonRecursive() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE);
try {
fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null);
- assertTrue("Should've thrown", false);
- } catch (FileNotFoundException e) {
+ fail("Should've thrown");
+ } catch (FileNotFoundException expected) {
}
fs.mkdirs(TEST_FOLDER_PATH);
fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null)
.close();
- assertTrue(fs.exists(testFile));
+ assertIsFile(fs, testFile);
}
@Test
@SuppressWarnings("deprecation")
public void testCreateNonRecursive1() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE);
try {
fs.createNonRecursive(testFile, FsPermission.getDefault(), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 1024, (short) 1, 1024, null);
- assertTrue("Should've thrown", false);
- } catch (FileNotFoundException e) {
+ fail("Should've thrown");
+ } catch (FileNotFoundException expected) {
}
fs.mkdirs(TEST_FOLDER_PATH);
fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null)
.close();
- assertTrue(fs.exists(testFile));
+ assertIsFile(fs, testFile);
+
}
@Test
@SuppressWarnings("deprecation")
public void testCreateNonRecursive2() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
Path testFile = new Path(TEST_FOLDER_PATH, TEST_CHILD_FILE);
try {
fs.createNonRecursive(testFile, FsPermission.getDefault(), false, 1024, (short) 1, 1024, null);
- assertTrue("Should've thrown", false);
+ fail("Should've thrown");
} catch (FileNotFoundException e) {
}
fs.mkdirs(TEST_FOLDER_PATH);
fs.createNonRecursive(testFile, true, 1024, (short) 1, 1024, null)
.close();
- assertTrue(fs.exists(testFile));
+ assertIsFile(fs, testFile);
}
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java
index 372a0876b11..91d1723a752 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java
@@ -28,71 +28,79 @@ import java.util.concurrent.Future;
import org.junit.Test;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
-import static org.junit.Assert.assertEquals;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertDeleted;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/**
* Test delete operation.
*/
-public class ITestAzureBlobFileSystemDelete extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemDelete extends
+ AbstractAbfsIntegrationTest {
public ITestAzureBlobFileSystemDelete() {
super();
}
@Test
public void testDeleteRoot() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
fs.mkdirs(new Path("/testFolder0"));
fs.mkdirs(new Path("/testFolder1"));
fs.mkdirs(new Path("/testFolder2"));
- fs.create(new Path("/testFolder1/testfile"));
- fs.create(new Path("/testFolder1/testfile2"));
- fs.create(new Path("/testFolder1/testfile3"));
+ touch(new Path("/testFolder1/testfile"));
+ touch(new Path("/testFolder1/testfile2"));
+ touch(new Path("/testFolder1/testfile3"));
- FileStatus[] ls = fs.listStatus(new Path("/"));
- assertEquals(4, ls.length); // and user dir
+ Path root = new Path("/");
+ FileStatus[] ls = fs.listStatus(root);
+ assertEquals(3, ls.length);
- fs.delete(new Path("/"), true);
- ls = fs.listStatus(new Path("/"));
- assertEquals(0, ls.length);
+ fs.delete(root, true);
+ ls = fs.listStatus(root);
+ assertEquals("listing size", 0, ls.length);
}
- @Test(expected = FileNotFoundException.class)
+ @Test()
public void testOpenFileAfterDelete() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(new Path("/testFile"));
- fs.delete(new Path("/testFile"), false);
+ final AzureBlobFileSystem fs = getFileSystem();
+ Path testfile = new Path("/testFile");
+ touch(testfile);
+ assertDeleted(fs, testfile, false);
- fs.open(new Path("/testFile"));
+ intercept(FileNotFoundException.class,
+ () -> fs.open(testfile));
}
- @Test(expected = FileNotFoundException.class)
+ @Test
public void testEnsureFileIsDeleted() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(new Path("testfile"));
- fs.delete(new Path("testfile"), false);
-
- fs.getFileStatus(new Path("testfile"));
+ final AzureBlobFileSystem fs = getFileSystem();
+ Path testfile = new Path("testfile");
+ touch(testfile);
+ assertDeleted(fs, testfile, false);
+ assertPathDoesNotExist(fs, "deleted", testfile);
}
- @Test(expected = FileNotFoundException.class)
+ @Test
public void testDeleteDirectory() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.mkdirs(new Path("testfile"));
+ final AzureBlobFileSystem fs = getFileSystem();
+ Path dir = new Path("testfile");
+ fs.mkdirs(dir);
fs.mkdirs(new Path("testfile/test1"));
fs.mkdirs(new Path("testfile/test1/test2"));
- fs.delete(new Path("testfile"), true);
- fs.getFileStatus(new Path("testfile"));
+ assertDeleted(fs, dir, true);
+ assertPathDoesNotExist(fs, "deleted", dir);
}
- @Test(expected = FileNotFoundException.class)
+ @Test
public void testDeleteFirstLevelDirectory() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final List tasks = new ArrayList<>();
+ final AzureBlobFileSystem fs = getFileSystem();
+ final List> tasks = new ArrayList<>();
ExecutorService es = Executors.newFixedThreadPool(10);
for (int i = 0; i < 1000; i++) {
@@ -100,7 +108,7 @@ public class ITestAzureBlobFileSystemDelete extends DependencyInjectedTest {
Callable callable = new Callable() {
@Override
public Void call() throws Exception {
- fs.create(fileName);
+ touch(fileName);
return null;
}
};
@@ -113,7 +121,12 @@ public class ITestAzureBlobFileSystemDelete extends DependencyInjectedTest {
}
es.shutdownNow();
- fs.delete(new Path("/test"), true);
- fs.getFileStatus(new Path("/test"));
+ Path dir = new Path("/test");
+ // first try a non-recursive delete, expect failure
+ intercept(FileAlreadyExistsException.class,
+ () -> fs.delete(dir, false));
+ assertDeleted(fs, dir, true);
+ assertPathDoesNotExist(fs, "deleted", dir);
+
}
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java
index ad22f999fe6..057dfa03115 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java
@@ -38,7 +38,7 @@ import static org.junit.Assert.assertArrayEquals;
/**
* Test end to end between ABFS client and ABFS server.
*/
-public class ITestAzureBlobFileSystemE2E extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemE2E extends AbstractAbfsIntegrationTest {
private static final Path TEST_FILE = new Path("testfile");
private static final int TEST_BYTE = 100;
private static final int TEST_OFFSET = 100;
@@ -53,11 +53,11 @@ public class ITestAzureBlobFileSystemE2E extends DependencyInjectedTest {
@Test
public void testWriteOneByteToFile() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- FSDataOutputStream stream = fs.create(TEST_FILE);
+ final AzureBlobFileSystem fs = getFileSystem();
- stream.write(TEST_BYTE);
- stream.close();
+ try(FSDataOutputStream stream = fs.create(TEST_FILE)) {
+ stream.write(TEST_BYTE);
+ }
FileStatus fileStatus = fs.getFileStatus(TEST_FILE);
assertEquals(1, fileStatus.getLen());
@@ -65,52 +65,52 @@ public class ITestAzureBlobFileSystemE2E extends DependencyInjectedTest {
@Test
public void testReadWriteBytesToFile() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
testWriteOneByteToFile();
- FSDataInputStream inputStream = fs.open(TEST_FILE, TEST_DEFAULT_BUFFER_SIZE);
- int i = inputStream.read();
- inputStream.close();
-
- assertEquals(TEST_BYTE, i);
+ try(FSDataInputStream inputStream = fs.open(TEST_FILE,
+ TEST_DEFAULT_BUFFER_SIZE)) {
+ assertEquals(TEST_BYTE, inputStream.read());
+ }
}
@Test (expected = IOException.class)
public void testOOBWrites() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
int readBufferSize = fs.getAbfsStore().getAbfsConfiguration().getReadBufferSize();
- fs.create(TEST_FILE);
- FSDataOutputStream writeStream = fs.create(TEST_FILE);
-
byte[] bytesToRead = new byte[readBufferSize];
final byte[] b = new byte[2 * readBufferSize];
new Random().nextBytes(b);
- writeStream.write(b);
- writeStream.flush();
- writeStream.close();
- FSDataInputStream readStream = fs.open(TEST_FILE);
- readStream.read(bytesToRead, 0, readBufferSize);
+ try(FSDataOutputStream writeStream = fs.create(TEST_FILE)) {
+ writeStream.write(b);
+ writeStream.flush();
+ }
- writeStream = fs.create(TEST_FILE);
- writeStream.write(b);
- writeStream.flush();
- writeStream.close();
+ try (FSDataInputStream readStream = fs.open(TEST_FILE)) {
+ assertEquals(readBufferSize,
+ readStream.read(bytesToRead, 0, readBufferSize));
- readStream.read(bytesToRead, 0, readBufferSize);
- readStream.close();
+ try (FSDataOutputStream writeStream = fs.create(TEST_FILE)) {
+ writeStream.write(b);
+ writeStream.flush();
+ }
+
+ assertEquals(readBufferSize,
+ readStream.read(bytesToRead, 0, readBufferSize));
+ }
}
@Test
public void testWriteWithBufferOffset() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final FSDataOutputStream stream = fs.create(TEST_FILE);
+ final AzureBlobFileSystem fs = getFileSystem();
final byte[] b = new byte[1024 * 1000];
new Random().nextBytes(b);
- stream.write(b, TEST_OFFSET, b.length - TEST_OFFSET);
- stream.close();
+ try(final FSDataOutputStream stream = fs.create(TEST_FILE)) {
+ stream.write(b, TEST_OFFSET, b.length - TEST_OFFSET);
+ }
final byte[] r = new byte[TEST_DEFAULT_READ_BUFFER_SIZE];
FSDataInputStream inputStream = fs.open(TEST_FILE, TEST_DEFAULT_BUFFER_SIZE);
@@ -124,13 +124,11 @@ public class ITestAzureBlobFileSystemE2E extends DependencyInjectedTest {
@Test
public void testReadWriteHeavyBytesToFileWithSmallerChunks() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final FSDataOutputStream stream = fs.create(TEST_FILE);
+ final AzureBlobFileSystem fs = getFileSystem();
final byte[] writeBuffer = new byte[5 * 1000 * 1024];
new Random().nextBytes(writeBuffer);
- stream.write(writeBuffer);
- stream.close();
+ write(TEST_FILE, writeBuffer);
final byte[] readBuffer = new byte[5 * 1000 * 1024];
FSDataInputStream inputStream = fs.open(TEST_FILE, TEST_DEFAULT_BUFFER_SIZE);
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2EScale.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2EScale.java
index 616253bca35..04690de2403 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2EScale.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2EScale.java
@@ -26,7 +26,6 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
-import org.junit.Assert;
import org.junit.Test;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -35,28 +34,24 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-
/**
* Test end to end between ABFS client and ABFS server with heavy traffic.
*/
-public class ITestAzureBlobFileSystemE2EScale extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemE2EScale extends
+ AbstractAbfsScaleTest {
private static final int TEN = 10;
private static final int ONE_THOUSAND = 1000;
private static final int BASE_SIZE = 1024;
private static final int ONE_MB = 1024 * 1024;
private static final int DEFAULT_WRITE_TIMES = 100;
- private static final Path TEST_FILE = new Path("testfile");
+ private static final Path TEST_FILE = new Path("ITestAzureBlobFileSystemE2EScale");
public ITestAzureBlobFileSystemE2EScale() {
- super();
}
@Test
- public void testWriteHeavyBytesToFile() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ public void testWriteHeavyBytesToFileAcrossThreads() throws Exception {
+ final AzureBlobFileSystem fs = getFileSystem();
final FSDataOutputStream stream = fs.create(TEST_FILE);
ExecutorService es = Executors.newFixedThreadPool(TEN);
@@ -65,7 +60,8 @@ public class ITestAzureBlobFileSystemE2EScale extends DependencyInjectedTest {
new Random().nextBytes(b);
List> tasks = new ArrayList<>();
- for (int i = 0; i < DEFAULT_WRITE_TIMES; i++) {
+ int operationCount = DEFAULT_WRITE_TIMES;
+ for (int i = 0; i < operationCount; i++) {
Callable callable = new Callable() {
@Override
public Void call() throws Exception {
@@ -86,48 +82,38 @@ public class ITestAzureBlobFileSystemE2EScale extends DependencyInjectedTest {
es.shutdownNow();
FileStatus fileStatus = fs.getFileStatus(TEST_FILE);
- assertEquals(testWriteBufferSize * DEFAULT_WRITE_TIMES, fileStatus.getLen());
- }
-
- @Test
- public void testReadWriteHeavyBytesToFile() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final FSDataOutputStream stream = fs.create(TEST_FILE);
-
- int testBufferSize = 5 * TEN * ONE_THOUSAND * BASE_SIZE;
- final byte[] b = new byte[testBufferSize];
- new Random().nextBytes(b);
- stream.write(b);
- stream.close();
-
- final byte[] r = new byte[testBufferSize];
- FSDataInputStream inputStream = fs.open(TEST_FILE, 4 * ONE_MB);
- int result = inputStream.read(r);
- inputStream.close();
-
- assertNotEquals(-1, result);
- assertArrayEquals(r, b);
+ assertEquals(testWriteBufferSize * operationCount, fileStatus.getLen());
}
@Test
public void testReadWriteHeavyBytesToFileWithStatistics() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final FSDataOutputStream stream = fs.create(TEST_FILE);
- final FileSystem.Statistics abfsStatistics = fs.getFsStatistics();
- abfsStatistics.reset();
+ final AzureBlobFileSystem fs = getFileSystem();
+ final FileSystem.Statistics abfsStatistics;
+ int testBufferSize;
+ final byte[] sourceData;
+ try(final FSDataOutputStream stream = fs.create(TEST_FILE)) {
+ abfsStatistics = fs.getFsStatistics();
+ abfsStatistics.reset();
- int testBufferSize = 5 * TEN * ONE_THOUSAND * BASE_SIZE;
- final byte[] b = new byte[testBufferSize];
- new Random().nextBytes(b);
- stream.write(b);
- stream.close();
+ testBufferSize = 5 * TEN * ONE_THOUSAND * BASE_SIZE;
+ sourceData = new byte[testBufferSize];
+ new Random().nextBytes(sourceData);
+ stream.write(sourceData);
+ }
- final byte[] r = new byte[testBufferSize];
- FSDataInputStream inputStream = fs.open(TEST_FILE, 4 * ONE_MB);
- inputStream.read(r);
- inputStream.close();
+ final byte[] remoteData = new byte[testBufferSize];
+ int bytesRead;
+ try (FSDataInputStream inputStream = fs.open(TEST_FILE, 4 * ONE_MB)) {
+ bytesRead = inputStream.read(remoteData);
+ }
+
+ String stats = abfsStatistics.toString();
+ assertEquals("Bytes read in " + stats,
+ remoteData.length, abfsStatistics.getBytesRead());
+ assertEquals("bytes written in " + stats,
+ sourceData.length, abfsStatistics.getBytesWritten());
+ assertEquals("bytesRead from read() call", testBufferSize, bytesRead );
+ assertArrayEquals("round tripped data", sourceData, remoteData);
- Assert.assertEquals(r.length, abfsStatistics.getBytesRead());
- Assert.assertEquals(b.length, abfsStatistics.getBytesWritten());
}
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
index bfa662d455b..791694bf0f1 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.fs.azurebfs;
+import java.io.IOException;
+
import org.junit.Test;
import org.apache.hadoop.fs.FileStatus;
@@ -25,12 +27,11 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
-import static org.junit.Assert.assertEquals;
-
/**
* Test FileStatus.
*/
-public class ITestAzureBlobFileSystemFileStatus extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemFileStatus extends
+ AbstractAbfsIntegrationTest {
private static final Path TEST_FILE = new Path("testFile");
private static final Path TEST_FOLDER = new Path("testDir");
public ITestAzureBlobFileSystemFileStatus() {
@@ -41,24 +42,38 @@ public class ITestAzureBlobFileSystemFileStatus extends DependencyInjectedTest {
public void testEnsureStatusWorksForRoot() throws Exception {
final AzureBlobFileSystem fs = this.getFileSystem();
- fs.getFileStatus(new Path("/"));
- fs.listStatus(new Path("/"));
+ Path root = new Path("/");
+ FileStatus[] rootls = fs.listStatus(root);
+ assertEquals("root listing", 0, rootls.length);
}
@Test
public void testFileStatusPermissionsAndOwnerAndGroup() throws Exception {
final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(TEST_FILE);
+ touch(TEST_FILE);
+ validateStatus(fs, TEST_FILE);
+ }
+
+ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name)
+ throws IOException {
+ FileStatus fileStatus = fs.getFileStatus(name);
+ String errorInStatus = "error in " + fileStatus + " from " + fs;
+ assertEquals(errorInStatus + ": permission",
+ new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL),
+ fileStatus.getPermission());
+ assertEquals(errorInStatus + ": owner",
+ fs.getOwnerUser(), fileStatus.getOwner());
+ assertEquals(errorInStatus + ": group",
+ fs.getOwnerUserPrimaryGroup(), fileStatus.getGroup());
+ return fileStatus;
+ }
+
+ @Test
+ public void testFolderStatusPermissionsAndOwnerAndGroup() throws Exception {
+ final AzureBlobFileSystem fs = this.getFileSystem();
fs.mkdirs(TEST_FOLDER);
- FileStatus fileStatus = fs.getFileStatus(TEST_FILE);
- assertEquals(new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL), fileStatus.getPermission());
- assertEquals(fs.getOwnerUser(), fileStatus.getGroup());
- assertEquals(fs.getOwnerUserPrimaryGroup(), fileStatus.getOwner());
-
- fileStatus = fs.getFileStatus(TEST_FOLDER);
- assertEquals(new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL), fileStatus.getPermission());
- assertEquals(fs.getOwnerUser(), fileStatus.getGroup());
- assertEquals(fs.getOwnerUserPrimaryGroup(), fileStatus.getOwner());
+ validateStatus(fs, TEST_FOLDER);
}
+
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java
index 8c2e8ce32dd..d90f0186da1 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java
@@ -34,14 +34,10 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-
/**
* Test flush operation.
*/
-public class ITestAzureBlobFileSystemFlush extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemFlush extends AbstractAbfsScaleTest {
private static final int BASE_SIZE = 1024;
private static final int ONE_THOUSAND = 1000;
private static final int TEST_BUFFER_SIZE = 5 * ONE_THOUSAND * BASE_SIZE;
@@ -56,146 +52,145 @@ public class ITestAzureBlobFileSystemFlush extends DependencyInjectedTest {
}
@Test
- public void testAbfsOutputStreamAsyncFlushWithRetainUncommitedData() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final FSDataOutputStream stream = fs.create(TEST_FILE_PATH);
+ public void testAbfsOutputStreamAsyncFlushWithRetainUncommittedData() throws Exception {
+ final AzureBlobFileSystem fs = getFileSystem();
+ final byte[] b;
+ try(final FSDataOutputStream stream = fs.create(TEST_FILE_PATH)) {
+ b = new byte[TEST_BUFFER_SIZE];
+ new Random().nextBytes(b);
- final byte[] b = new byte[TEST_BUFFER_SIZE];
- new Random().nextBytes(b);
+ for (int i = 0; i < 2; i++) {
+ stream.write(b);
- for (int i = 0; i < 2; i++) {
+ for (int j = 0; j < FLUSH_TIMES; j++) {
+ stream.flush();
+ Thread.sleep(10);
+ }
+ }
+ }
+
+ final byte[] r = new byte[TEST_BUFFER_SIZE];
+ try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH, 4 * ONE_MB)) {
+ while (inputStream.available() != 0) {
+ int result = inputStream.read(r);
+
+ assertNotEquals("read returned -1", -1, result);
+ assertArrayEquals("buffer read from stream", r, b);
+ }
+ }
+ }
+
+ @Test
+ public void testAbfsOutputStreamSyncFlush() throws Exception {
+ final AzureBlobFileSystem fs = getFileSystem();
+ final byte[] b;
+ try(final FSDataOutputStream stream = fs.create(TEST_FILE_PATH)) {
+ b = new byte[TEST_BUFFER_SIZE];
+ new Random().nextBytes(b);
stream.write(b);
- for (int j = 0; j < FLUSH_TIMES; j++) {
- stream.flush();
+ for (int i = 0; i < FLUSH_TIMES; i++) {
+ stream.hsync();
+ stream.hflush();
Thread.sleep(10);
}
}
- stream.close();
-
final byte[] r = new byte[TEST_BUFFER_SIZE];
- FSDataInputStream inputStream = fs.open(TEST_FILE_PATH, 4 * ONE_MB);
-
- while (inputStream.available() != 0) {
+ try(FSDataInputStream inputStream = fs.open(TEST_FILE_PATH, 4 * ONE_MB)) {
int result = inputStream.read(r);
assertNotEquals(-1, result);
assertArrayEquals(r, b);
}
-
- inputStream.close();
- }
-
- @Test
- public void testAbfsOutputStreamSyncFlush() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final FSDataOutputStream stream = fs.create(TEST_FILE_PATH);
-
- final byte[] b = new byte[TEST_BUFFER_SIZE];
- new Random().nextBytes(b);
- stream.write(b);
-
- for (int i = 0; i < FLUSH_TIMES; i++) {
- stream.hsync();
- stream.hflush();
- Thread.sleep(10);
- }
- stream.close();
-
- final byte[] r = new byte[TEST_BUFFER_SIZE];
- FSDataInputStream inputStream = fs.open(TEST_FILE_PATH, 4 * ONE_MB);
- int result = inputStream.read(r);
-
- assertNotEquals(-1, result);
- assertArrayEquals(r, b);
-
- inputStream.close();
}
@Test
public void testWriteHeavyBytesToFileSyncFlush() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final FSDataOutputStream stream = fs.create(TEST_FILE_PATH);
- final FileSystem.Statistics abfsStatistics = fs.getFsStatistics();
- abfsStatistics.reset();
+ final AzureBlobFileSystem fs = getFileSystem();
+ final FileSystem.Statistics abfsStatistics;
+ ExecutorService es;
+ try(final FSDataOutputStream stream = fs.create(TEST_FILE_PATH)) {
+ abfsStatistics = fs.getFsStatistics();
+ abfsStatistics.reset();
- ExecutorService es = Executors.newFixedThreadPool(10);
+ es = Executors.newFixedThreadPool(10);
- final byte[] b = new byte[TEST_BUFFER_SIZE];
- new Random().nextBytes(b);
+ final byte[] b = new byte[TEST_BUFFER_SIZE];
+ new Random().nextBytes(b);
- List> tasks = new ArrayList<>();
- for (int i = 0; i < FLUSH_TIMES; i++) {
- Callable callable = new Callable() {
- @Override
- public Void call() throws Exception {
- stream.write(b);
- return null;
- }
- };
+ List> tasks = new ArrayList<>();
+ for (int i = 0; i < FLUSH_TIMES; i++) {
+ Callable callable = new Callable() {
+ @Override
+ public Void call() throws Exception {
+ stream.write(b);
+ return null;
+ }
+ };
- tasks.add(es.submit(callable));
- }
+ tasks.add(es.submit(callable));
+ }
- boolean shouldStop = false;
- while (!shouldStop) {
- shouldStop = true;
- for (Future task : tasks) {
- if (!task.isDone()) {
- stream.hsync();
- shouldStop = false;
- Thread.sleep(THREAD_SLEEP_TIME);
+ boolean shouldStop = false;
+ while (!shouldStop) {
+ shouldStop = true;
+ for (Future task : tasks) {
+ if (!task.isDone()) {
+ stream.hsync();
+ shouldStop = false;
+ Thread.sleep(THREAD_SLEEP_TIME);
+ }
}
}
- }
- tasks.clear();
- stream.close();
+ tasks.clear();
+ }
es.shutdownNow();
FileStatus fileStatus = fs.getFileStatus(TEST_FILE_PATH);
- assertEquals((long) TEST_BUFFER_SIZE * FLUSH_TIMES, fileStatus.getLen());
- assertEquals((long) TEST_BUFFER_SIZE * FLUSH_TIMES, abfsStatistics.getBytesWritten());
+ long expectedWrites = (long) TEST_BUFFER_SIZE * FLUSH_TIMES;
+ assertEquals("Wrong file length in " + fileStatus, expectedWrites, fileStatus.getLen());
+ assertEquals("wrong bytes Written count in " + abfsStatistics,
+ expectedWrites, abfsStatistics.getBytesWritten());
}
@Test
public void testWriteHeavyBytesToFileAsyncFlush() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(TEST_FILE_PATH);
- final FSDataOutputStream stream = fs.create(TEST_FILE_PATH);
+ final AzureBlobFileSystem fs = getFileSystem();
ExecutorService es = Executors.newFixedThreadPool(10);
+ try(final FSDataOutputStream stream = fs.create(TEST_FILE_PATH)) {
- final byte[] b = new byte[TEST_BUFFER_SIZE];
- new Random().nextBytes(b);
+ final byte[] b = new byte[TEST_BUFFER_SIZE];
+ new Random().nextBytes(b);
- List> tasks = new ArrayList<>();
- for (int i = 0; i < FLUSH_TIMES; i++) {
- Callable callable = new Callable() {
- @Override
- public Void call() throws Exception {
- stream.write(b);
- return null;
- }
- };
+ List> tasks = new ArrayList<>();
+ for (int i = 0; i < FLUSH_TIMES; i++) {
+ Callable callable = new Callable() {
+ @Override
+ public Void call() throws Exception {
+ stream.write(b);
+ return null;
+ }
+ };
- tasks.add(es.submit(callable));
- }
+ tasks.add(es.submit(callable));
+ }
- boolean shouldStop = false;
- while (!shouldStop) {
- shouldStop = true;
- for (Future task : tasks) {
- if (!task.isDone()) {
- stream.flush();
- shouldStop = false;
+ boolean shouldStop = false;
+ while (!shouldStop) {
+ shouldStop = true;
+ for (Future task : tasks) {
+ if (!task.isDone()) {
+ stream.flush();
+ shouldStop = false;
+ }
}
}
+ Thread.sleep(THREAD_SLEEP_TIME);
+ tasks.clear();
}
- Thread.sleep(THREAD_SLEEP_TIME);
- tasks.clear();
- stream.close();
es.shutdownNow();
FileStatus fileStatus = fs.getFileStatus(TEST_FILE_PATH);
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java
index d2ed4008aa2..5a6e46db016 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java
@@ -22,29 +22,32 @@ import java.io.FileNotFoundException;
import org.junit.Test;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys;
/**
* Test filesystem initialization and creation.
*/
-public class ITestAzureBlobFileSystemInitAndCreate extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemInitAndCreate extends
+ AbstractAbfsIntegrationTest {
public ITestAzureBlobFileSystemInitAndCreate() {
- super();
this.getConfiguration().unset(ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION);
}
@Override
- public void initialize() {
+ public void setup() {
}
@Override
- public void testCleanup() {
+ public void teardown() {
}
@Test (expected = FileNotFoundException.class)
public void ensureFilesystemWillNotBeCreatedIfCreationConfigIsNotSet() throws Exception {
- super.initialize();
- this.getFileSystem();
+ super.setup();
+ final AzureBlobFileSystem fs = this.getFileSystem();
+ FileStatus[] fileStatuses = fs.listStatus(new Path("/"));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java
index 6059766c2ad..b87abe68355 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java
@@ -26,20 +26,21 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
-import org.junit.Assert;
import org.junit.Test;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/**
* Test listStatus operation.
*/
-public class ITestAzureBlobFileSystemListStatus extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemListStatus extends
+ AbstractAbfsIntegrationTest {
private static final int TEST_FILES_NUMBER = 6000;
public ITestAzureBlobFileSystemListStatus() {
super();
@@ -47,8 +48,8 @@ public class ITestAzureBlobFileSystemListStatus extends DependencyInjectedTest {
@Test
public void testListPath() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- final List tasks = new ArrayList<>();
+ final AzureBlobFileSystem fs = getFileSystem();
+ final List> tasks = new ArrayList<>();
ExecutorService es = Executors.newFixedThreadPool(10);
for (int i = 0; i < TEST_FILES_NUMBER; i++) {
@@ -56,7 +57,7 @@ public class ITestAzureBlobFileSystemListStatus extends DependencyInjectedTest {
Callable callable = new Callable() {
@Override
public Void call() throws Exception {
- fs.create(fileName);
+ touch(fileName);
return null;
}
};
@@ -70,63 +71,101 @@ public class ITestAzureBlobFileSystemListStatus extends DependencyInjectedTest {
es.shutdownNow();
FileStatus[] files = fs.listStatus(new Path("/"));
- Assert.assertEquals(files.length, TEST_FILES_NUMBER + 1 /* user directory */);
+ assertEquals(TEST_FILES_NUMBER, files.length /* user directory */);
}
+ /**
+ * Creates a file, verifies that listStatus returns it,
+ * even while the file is still open for writing.
+ */
@Test
public void testListFileVsListDir() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(new Path("/testFile"));
-
- FileStatus[] testFiles = fs.listStatus(new Path("/testFile"));
- Assert.assertEquals(testFiles.length, 1);
- Assert.assertFalse(testFiles[0].isDirectory());
+ final AzureBlobFileSystem fs = getFileSystem();
+ Path path = new Path("/testFile");
+ try(FSDataOutputStream ignored = fs.create(path)) {
+ FileStatus[] testFiles = fs.listStatus(path);
+ assertEquals("length of test files", 1, testFiles.length);
+ FileStatus status = testFiles[0];
+ assertIsFileReference(status);
+ }
}
@Test
public void testListFileVsListDir2() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
fs.mkdirs(new Path("/testFolder"));
fs.mkdirs(new Path("/testFolder/testFolder2"));
fs.mkdirs(new Path("/testFolder/testFolder2/testFolder3"));
- fs.create(new Path("/testFolder/testFolder2/testFolder3/testFile"));
+ Path testFile0Path = new Path("/testFolder/testFolder2/testFolder3/testFile");
+ ContractTestUtils.touch(fs, testFile0Path);
- FileStatus[] testFiles = fs.listStatus(new Path("/testFolder/testFolder2/testFolder3/testFile"));
- Assert.assertEquals(testFiles.length, 1);
- Assert.assertEquals(testFiles[0].getPath(), new Path(this.getTestUrl(),
- "/testFolder/testFolder2/testFolder3/testFile"));
- Assert.assertFalse(testFiles[0].isDirectory());
+ FileStatus[] testFiles = fs.listStatus(testFile0Path);
+ assertEquals("Wrong listing size of file " + testFile0Path,
+ 1, testFiles.length);
+ FileStatus file0 = testFiles[0];
+ assertEquals("Wrong path for " + file0,
+ new Path(getTestUrl(), "/testFolder/testFolder2/testFolder3/testFile"),
+ file0.getPath());
+ assertIsFileReference(file0);
}
@Test(expected = FileNotFoundException.class)
public void testListNonExistentDir() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
+ final AzureBlobFileSystem fs = getFileSystem();
fs.listStatus(new Path("/testFile/"));
}
@Test
public void testListFiles() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.mkdirs(new Path("/test"));
+ final AzureBlobFileSystem fs = getFileSystem();
+ Path testDir = new Path("/test");
+ fs.mkdirs(testDir);
FileStatus[] fileStatuses = fs.listStatus(new Path("/"));
- assertEquals(fileStatuses.length, 2);
+ assertEquals(1, fileStatuses.length);
fs.mkdirs(new Path("/test/sub"));
- fileStatuses = fs.listStatus(new Path("/test"));
- assertEquals(fileStatuses.length, 1);
- assertEquals(fileStatuses[0].getPath().getName(), "sub");
- assertTrue(fileStatuses[0].isDirectory());
- assertEquals(fileStatuses[0].getLen(), 0);
+ fileStatuses = fs.listStatus(testDir);
+ assertEquals(1, fileStatuses.length);
+ assertEquals("sub", fileStatuses[0].getPath().getName());
+ assertIsDirectoryReference(fileStatuses[0]);
+ Path childF = fs.makeQualified(new Path("/test/f"));
+ touch(childF);
+ fileStatuses = fs.listStatus(testDir);
+ assertEquals(2, fileStatuses.length);
+ final FileStatus childStatus = fileStatuses[0];
+ assertEquals(childF, childStatus.getPath());
+ assertEquals("f", childStatus.getPath().getName());
+ assertIsFileReference(childStatus);
+ assertEquals(0, childStatus.getLen());
+ final FileStatus status1 = fileStatuses[1];
+ assertEquals("sub", status1.getPath().getName());
+ assertIsDirectoryReference(status1);
+ // look at the child through getFileStatus
+ LocatedFileStatus locatedChildStatus = fs.listFiles(childF, false).next();
+ assertIsFileReference(locatedChildStatus);
- fs.create(new Path("/test/f"));
- fileStatuses = fs.listStatus(new Path("/test"));
- assertEquals(fileStatuses.length, 2);
- assertEquals(fileStatuses[0].getPath().getName(), "f");
- assertFalse(fileStatuses[0].isDirectory());
- assertEquals(fileStatuses[0].getLen(), 0);
- assertEquals(fileStatuses[1].getPath().getName(), "sub");
- assertTrue(fileStatuses[1].isDirectory());
- assertEquals(fileStatuses[1].getLen(), 0);
+ fs.delete(testDir, true);
+ intercept(FileNotFoundException.class,
+ () -> fs.listFiles(childF, false).next());
+
+ // do some final checks on the status (failing due to version checks)
+ assertEquals("Path mismatch of " + locatedChildStatus,
+ childF, locatedChildStatus.getPath());
+ assertEquals("locatedstatus.equals(status)",
+ locatedChildStatus, childStatus);
+ assertEquals("status.equals(locatedstatus)",
+ childStatus, locatedChildStatus);
+ }
+
+ private void assertIsDirectoryReference(FileStatus status) {
+ assertTrue("Not a directory: " + status, status.isDirectory());
+ assertFalse("Not a directory: " + status, status.isFile());
+ assertEquals(0, status.getLen());
+ }
+
+ private void assertIsFileReference(FileStatus status) {
+ assertFalse("Not a file: " + status, status.isDirectory());
+ assertTrue("Not a file: " + status, status.isFile());
}
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java
index b61908c13ea..1bb2c54b376 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java
@@ -18,71 +18,30 @@
package org.apache.hadoop.fs.azurebfs;
-import java.util.concurrent.Callable;
-
import org.junit.Test;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.Path;
-import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs;
/**
* Test mkdir operation.
*/
-public class ITestAzureBlobFileSystemMkDir extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemMkDir extends AbstractAbfsIntegrationTest {
public ITestAzureBlobFileSystemMkDir() {
super();
}
@Test
public void testCreateDirWithExistingDir() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- assertTrue(fs.mkdirs(new Path("testFolder")));
- assertTrue(fs.mkdirs(new Path("testFolder")));
- }
-
- @Test(expected = FileAlreadyExistsException.class)
- public void createDirectoryUnderFile() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(new Path("testFile"));
- fs.mkdirs(new Path("testFile/TestDirectory"));
- }
-
- @Test
- public void testCreateDirectoryOverExistingFiles() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(new Path("/testPath"));
- FileAlreadyExistsException ex = intercept(
- FileAlreadyExistsException.class,
- new Callable() {
- @Override
- public Void call() throws Exception {
- fs.mkdirs(new Path("/testPath"));
- return null;
- }
- });
-
- assertTrue(ex instanceof FileAlreadyExistsException);
-
- fs.create(new Path("/testPath1/file1"));
- ex = intercept(
- FileAlreadyExistsException.class,
- new Callable() {
- @Override
- public Void call() throws Exception {
- fs.mkdirs(new Path("/testPath1/file1"));
- return null;
- }
- });
-
- assertTrue(ex instanceof FileAlreadyExistsException);
+ final AzureBlobFileSystem fs = getFileSystem();
+ Path path = new Path("testFolder");
+ assertMkdirs(fs, path);
+ assertMkdirs(fs, path);
}
@Test
public void testCreateRoot() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- assertTrue(fs.mkdirs(new Path("/")));
+ assertMkdirs(getFileSystem(), new Path("/"));
}
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOpen.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOpen.java
deleted file mode 100644
index fef7f47f720..00000000000
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOpen.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.azurebfs;
-
-import java.io.FileNotFoundException;
-
-import org.junit.Test;
-
-import org.apache.hadoop.fs.Path;
-
-/**
- * Test open operation.
- */
-public class ITestAzureBlobFileSystemOpen extends DependencyInjectedTest {
- public ITestAzureBlobFileSystemOpen() throws Exception {
- super();
- }
-
- @Test(expected = FileNotFoundException.class)
- public void testOpenDirectory() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.mkdirs(new Path("testFolder"));
- fs.open(new Path("testFolder"));
- }
-}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java
index 8b96c69c8fc..c61de6764ea 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java
@@ -18,32 +18,31 @@
package org.apache.hadoop.fs.azurebfs;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Test;
-
import java.io.EOFException;
import java.io.IOException;
import java.util.Random;
import java.util.concurrent.Callable;
-import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertArrayEquals;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/**
* Test random read operation.
*/
-public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemRandomRead extends
+ AbstractAbfsScaleTest {
private static final int KILOBYTE = 1024;
private static final int MEGABYTE = KILOBYTE * KILOBYTE;
private static final long TEST_FILE_SIZE = 8 * MEGABYTE;
@@ -62,6 +61,9 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
private static final String ABFS = "ABFS";
private static long testFileLength = 0;
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ITestAzureBlobFileSystemRandomRead.class);
+
public ITestAzureBlobFileSystemRandomRead() throws Exception {
super();
}
@@ -76,7 +78,7 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
// forward seek and read a kilobyte into first kilobyte of bufferV2
inputStream.seek(5 * MEGABYTE);
int numBytesRead = inputStream.read(buffer, 0, KILOBYTE);
- assertEquals(KILOBYTE, numBytesRead);
+ assertEquals("Wrong number of bytes read", KILOBYTE, numBytesRead);
int len = MEGABYTE;
int offset = buffer.length - len;
@@ -84,7 +86,7 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
// reverse seek and read a megabyte into last megabyte of bufferV1
inputStream.seek(3 * MEGABYTE);
numBytesRead = inputStream.read(buffer, offset, len);
- assertEquals(len, numBytesRead);
+ assertEquals("Wrong number of bytes read after seek", len, numBytesRead);
}
}
@@ -391,7 +393,7 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
afterSeekElapsedMs = sequentialRead(ABFS,
this.getFileSystem(), true);
ratio = afterSeekElapsedMs / beforeSeekElapsedMs;
- System.out.println((String.format(
+ LOG.info((String.format(
"beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d, ratio=%3$.2f",
(long) beforeSeekElapsedMs,
(long) afterSeekElapsedMs,
@@ -425,7 +427,7 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
ratio = v2ElapsedMs / v1ElapsedMs;
- System.out.println(String.format(
+ LOG.info(String.format(
"v1ElapsedMs=%1$d, v2ElapsedMs=%2$d, ratio=%3$.2f",
(long) v1ElapsedMs,
(long) v2ElapsedMs,
@@ -464,7 +466,7 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
}
long elapsedTimeMs = timer.elapsedTimeMs();
- System.out.println(String.format(
+ LOG.info(String.format(
"v%1$s: bytesRead=%2$d, elapsedMs=%3$d, Mbps=%4$.2f,"
+ " afterReverseSeek=%5$s",
version,
@@ -496,7 +498,7 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
} while (bytesRead > 0 && totalBytesRead < minBytesToRead);
long elapsedTimeMs = timer.elapsedTimeMs();
inputStream.close();
- System.out.println(String.format(
+ LOG.info(String.format(
"v%1$d: totalBytesRead=%2$d, elapsedTimeMs=%3$d, Mbps=%4$.2f",
version,
totalBytesRead,
@@ -535,7 +537,7 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
character = (character == 'z') ? 'a' : (char) ((int) character + 1);
}
- System.out.println(String.format("Creating test file %s of size: %d ", TEST_FILE_PATH, TEST_FILE_SIZE));
+ LOG.info(String.format("Creating test file %s of size: %d ", TEST_FILE_PATH, TEST_FILE_SIZE));
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
try (FSDataOutputStream outputStream = fs.create(TEST_FILE_PATH)) {
@@ -544,7 +546,7 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
outputStream.write(buffer);
bytesWritten += buffer.length;
}
- System.out.println(String.format("Closing stream %s", outputStream));
+ LOG.info("Closing stream {}", outputStream);
ContractTestUtils.NanoTimer closeTimer
= new ContractTestUtils.NanoTimer();
outputStream.close();
@@ -578,4 +580,4 @@ public class ITestAzureBlobFileSystemRandomRead extends DependencyInjectedTest {
assertArrayEquals("Mismatch in read data", bufferV1, bufferV2);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java
index a0e648ca191..1a0edaf54e8 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.fs.azurebfs;
-import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
@@ -26,93 +25,74 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
-import org.junit.Assert;
import org.junit.Test;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
/**
* Test rename operation.
*/
-public class ITestAzureBlobFileSystemRename extends DependencyInjectedTest {
+public class ITestAzureBlobFileSystemRename extends
+ AbstractAbfsIntegrationTest {
public ITestAzureBlobFileSystemRename() {
- super();
}
- @Test(expected = FileNotFoundException.class)
+ @Test
public void testEnsureFileIsRenamed() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.create(new Path("testfile"));
- fs.rename(new Path("testfile"), new Path("testfile2"));
+ final AzureBlobFileSystem fs = getFileSystem();
+ Path src = path("testEnsureFileIsRenamed-src");
+ touch(src);
+ Path dest = path("testEnsureFileIsRenamed-dest");
+ fs.delete(dest, true);
+ assertRenameOutcome(fs, src, dest, true);
- FileStatus fileStatus = fs.getFileStatus(new Path("testfile2"));
- assertNotNull(fileStatus);
-
- fs.getFileStatus(new Path("testfile"));
+ assertIsFile(fs, dest);
+ assertPathDoesNotExist(fs, "expected renamed", src);
}
@Test
- public void testRenameFile() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- fs.mkdirs(new Path("/testSrc"));
- fs.create(new Path("/testSrc/file1"));
+ public void testRenameFileUnderDir() throws Exception {
+ final AzureBlobFileSystem fs = getFileSystem();
+ Path sourceDir = new Path("/testSrc");
+ assertMkdirs(fs, sourceDir);
+ String filename = "file1";
+ Path file1 = new Path(sourceDir, filename);
+ touch(file1);
- fs.rename(new Path("/testSrc"), new Path("/testDst"));
- FileStatus[] fileStatus = fs.listStatus(new Path("/testDst"));
- assertNotNull(fileStatus);
+ Path destDir = new Path("/testDst");
+ assertRenameOutcome(fs, sourceDir, destDir, true);
+ FileStatus[] fileStatus = fs.listStatus(destDir);
+ assertNotNull("Null file status", fileStatus);
+ FileStatus status = fileStatus[0];
+ assertEquals("Wrong filename in " + status,
+ filename, status.getPath().getName());
}
@Test
- public void testRenameFileUsingUnicode() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- //known issue: ListStatus operation to folders/files whose name contains '?' will fail
- //This is because Auto rest client didn't encode '?' in the uri query parameters
- String[] folders1 = new String[]{"/%2c%26", "/ÖáΠ⇒", "/A +B", "/A~`!@#$%^&*()-_+={};:'>,,, tasks = new ArrayList<>();
+ final AzureBlobFileSystem fs = getFileSystem();
+ final List> tasks = new ArrayList<>();
ExecutorService es = Executors.newFixedThreadPool(10);
for (int i = 0; i < 1000; i++) {
@@ -120,7 +100,7 @@ public class ITestAzureBlobFileSystemRename extends DependencyInjectedTest {
Callable callable = new Callable() {
@Override
public Void call() throws Exception {
- fs.create(fileName);
+ touch(fileName);
return null;
}
};
@@ -133,20 +113,25 @@ public class ITestAzureBlobFileSystemRename extends DependencyInjectedTest {
}
es.shutdownNow();
- fs.rename(new Path("/test"), new Path("/renamedDir"));
+ Path source = new Path("/test");
+ Path dest = new Path("/renamedDir");
+ assertRenameOutcome(fs, source, dest, true);
- FileStatus[] files = fs.listStatus(new Path("/renamedDir"));
- Assert.assertEquals(files.length, 1000);
- fs.getFileStatus(new Path("/test"));
+ FileStatus[] files = fs.listStatus(dest);
+ assertEquals("Wrong number of files in listing", 1000, files.length);
+ assertPathDoesNotExist(fs, "rename source dir", source);
}
@Test
public void testRenameRoot() throws Exception {
- final AzureBlobFileSystem fs = this.getFileSystem();
- boolean renamed = fs.rename(new Path("/"), new Path("/ddd"));
- assertFalse(renamed);
-
- renamed = fs.rename(new Path(fs.getUri().toString() + "/"), new Path(fs.getUri().toString() + "/s"));
- assertFalse(renamed);
+ final AzureBlobFileSystem fs = getFileSystem();
+ assertRenameOutcome(fs,
+ new Path("/"),
+ new Path("/testRenameRoot"),
+ false);
+ assertRenameOutcome(fs,
+ new Path(fs.getUri().toString() + "/"),
+ new Path(fs.getUri().toString() + "/s"),
+ false);
}
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java
new file mode 100644
index 00000000000..0ac7fcf08f0
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import java.util.Arrays;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome;
+
+/**
+ * Parameterized test of rename operations of unicode paths.
+ */
+@RunWith(Parameterized.class)
+public class ITestAzureBlobFileSystemRenameUnicode extends
+ AbstractAbfsIntegrationTest {
+
+ @Parameterized.Parameter
+ public String srcDir;
+
+ @Parameterized.Parameter(1)
+ public String destDir;
+
+ @Parameterized.Parameter(2)
+ public String filename;
+
+ @Parameterized.Parameters
+ public static Iterable