HADOOP-14553. Add (parallelized) integration tests to hadoop-azure

Contributed by Steve Loughran

(cherry picked from commit 2d2d97fa7d)
This commit is contained in:
Steve Loughran 2017-09-15 17:04:43 +01:00
parent b5e9982355
commit 9f6b08f840
No known key found for this signature in database
GPG Key ID: 950CC3E032B79CA2
71 changed files with 3598 additions and 1272 deletions

View File

@ -61,7 +61,16 @@ public abstract class FileSystemContractBaseTest {
protected byte[] data = dataset(getBlockSize() * 2, 0, 255);
@Rule
public Timeout globalTimeout = new Timeout(30000);
public Timeout globalTimeout = new Timeout(getGlobalTimeout());
/**
* Get the timeout in milliseconds for each test case.
* @return a time in milliseconds.
*/
protected int getGlobalTimeout() {
return 30 * 1000;
}
@Rule
public ExpectedException thrown = ExpectedException.none();

View File

@ -122,7 +122,7 @@ public abstract class AbstractContractOpenTest extends AbstractFSContractTestBas
Path path = path("testopenfiletwice.txt");
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value
createFile(getFileSystem(), path, false, block);
createFile(getFileSystem(), path, true, block);
//open first
FSDataInputStream instream1 = getFileSystem().open(path);
FSDataInputStream instream2 = null;
@ -150,7 +150,7 @@ public abstract class AbstractContractOpenTest extends AbstractFSContractTestBas
int base = 0x40; // 64
byte[] block = dataset(len, base, base + len);
//this file now has a simple rule: offset => (value | 0x40)
createFile(getFileSystem(), path, false, block);
createFile(getFileSystem(), path, true, block);
//open first
instream = getFileSystem().open(path);
assertEquals(base, instream.read());

View File

@ -341,7 +341,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
int filesize = 10 * 1024;
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
createFile(getFileSystem(), randomSeekFile, false, buf);
createFile(getFileSystem(), randomSeekFile, true, buf);
Random r = new Random();
// Record the sequence of seeks and reads which trigger a failure.

View File

@ -34,6 +34,15 @@
<properties>
<file.encoding>UTF-8</file.encoding>
<downloadSources>true</downloadSources>
<hadoop.tmp.dir>${project.build.directory}/test</hadoop.tmp.dir>
<!-- are scale tests enabled ? -->
<fs.azure.scale.test.enabled>unset</fs.azure.scale.test.enabled>
<!-- Size in MB of huge files. -->
<fs.azure.scale.test.huge.filesize>unset</fs.azure.scale.test.huge.filesize>
<!-- Size in MB of the partion size in huge file uploads. -->
<fs.azure.scale.test.huge.partitionsize>unset</fs.azure.scale.test.huge.partitionsize>
<!-- Timeout in seconds for scale tests.-->
<fs.azure.scale.test.timeout>7200</fs.azure.scale.test.timeout>
</properties>
<build>
@ -224,4 +233,246 @@
</dependency>
</dependencies>
<profiles>
<profile>
<id>parallel-tests</id>
<activation>
<property>
<name>parallel-tests</name>
</property>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>create-parallel-tests-dirs</id>
<phase>test-compile</phase>
<configuration>
<target>
<script language="javascript"><![CDATA[
var baseDirs = [
project.getProperty("test.build.data"),
project.getProperty("test.build.dir"),
project.getProperty("hadoop.tmp.dir")
];
for (var i in baseDirs) {
for (var j = 1; j <= ${testsThreadCount}; ++j) {
var mkdir = project.createTask("mkdir");
mkdir.setDir(new java.io.File(baseDirs[i], j));
mkdir.perform();
}
}
]]></script>
</target>
</configuration>
<goals>
<goal>run</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<executions>
<execution>
<id>default-test</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<forkCount>1</forkCount>
<forkCount>${testsThreadCount}</forkCount>
<reuseForks>false</reuseForks>
<argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
<forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
<systemPropertyVariables>
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
<fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
<fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
<fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
<fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
</systemPropertyVariables>
<includes>
<include>**/Test*.java</include>
</includes>
<excludes>
<exclude>**/TestRollingWindowAverage*.java</exclude>
</excludes>
</configuration>
</execution>
<execution>
<id>serialized-test</id>
<goals>
<goal>test</goal>
</goals>
<configuration>
<forkCount>1</forkCount>
<reuseForks>false</reuseForks>
<argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
<forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
<systemPropertyVariables>
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
<fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
<fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
<fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
<fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
</systemPropertyVariables>
<includes>
<include>**/TestRollingWindowAverage*.java</include>
</includes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<id>default-integration-test</id>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
<configuration>
<forkCount>${testsThreadCount}</forkCount>
<reuseForks>false</reuseForks>
<argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
<forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
<systemPropertyVariables>
<!-- Tell tests that they are being executed in parallel -->
<test.parallel.execution>true</test.parallel.execution>
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
<!-- Due to a Maven quirk, setting this to just -->
<!-- surefire.forkNumber won't do the parameter -->
<!-- substitution. Putting a prefix in front of it like -->
<!-- "fork-" makes it work. -->
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
<!-- Propagate scale parameters -->
<fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
<fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
<fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
<fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
</systemPropertyVariables>
<!-- Some tests cannot run in parallel. Tests that cover -->
<!-- access to the root directory must run in isolation -->
<!-- from anything else that could modify the bucket. -->
<!-- azure tests that cover multi-part upload must run in -->
<!-- isolation, because the file system is configured to -->
<!-- purge existing multi-part upload data on -->
<!-- initialization. MiniYARNCluster has not yet been -->
<!-- changed to handle parallel test execution gracefully. -->
<!-- Exclude all of these tests from parallel execution, -->
<!-- and instead run them sequentially in a separate -->
<!-- Surefire execution step later. -->
<includes>
<include>**/ITest*.java</include>
</includes>
<excludes>
<exclude>**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java</exclude>
<exclude>**/ITestFileSystemOperationsWithThreads.java</exclude>
<exclude>**/ITestOutOfBandAzureBlobOperationsLive.java</exclude>
<exclude>**/ITestNativeAzureFileSystemAuthorizationWithOwner.java</exclude>
<exclude>**/ITestNativeAzureFileSystemConcurrencyLive.java</exclude>
<exclude>**/ITestNativeAzureFileSystemLive.java</exclude>
<exclude>**/ITestNativeAzureFSPageBlobLive.java</exclude>
<exclude>**/ITestWasbRemoteCallHelper.java</exclude>
<exclude>**/ITestBlockBlobInputStream.java</exclude>
</excludes>
</configuration>
</execution>
<!-- Do a sequential run for tests that cannot handle -->
<!-- parallel execution. -->
<execution>
<id>sequential-integration-tests</id>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
<configuration>
<forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
<systemPropertyVariables>
<test.parallel.execution>false</test.parallel.execution>
<fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
<fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
<fs.azure.scale.test.huge.huge.partitionsize>${fs.azure.scale.test.huge.partitionsize}</fs.azure.scale.test.huge.huge.partitionsize>
<fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
</systemPropertyVariables>
<includes>
<include>**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java</include>
<include>**/ITestFileSystemOperationsWithThreads.java</include>
<include>**/ITestOutOfBandAzureBlobOperationsLive.java</include>
<include>**/ITestNativeAzureFileSystemAuthorizationWithOwner.java</include>
<include>**/ITestNativeAzureFileSystemConcurrencyLive.java</include>
<include>**/ITestNativeAzureFileSystemLive.java</include>
<include>**/ITestWasbRemoteCallHelper.java</include>
<include>**/ITestBlockBlobInputStream.java</include>
</includes>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>sequential-tests</id>
<activation>
<property>
<name>!parallel-tests</name>
</property>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
<configuration>
<systemPropertyVariables>
<!-- Propagate scale parameters -->
<fs.azure.scale.test.enabled>${fs.azure.scale.test.enabled}</fs.azure.scale.test.enabled>
<fs.azure.scale.test.huge.filesize>${fs.azure.scale.test.huge.filesize}</fs.azure.scale.test.huge.filesize>
<fs.azure.scale.test.timeout>${fs.azure.scale.test.timeout}</fs.azure.scale.test.timeout>
</systemPropertyVariables>
<forkedProcessTimeoutInSeconds>${fs.azure.scale.test.timeout}</forkedProcessTimeoutInSeconds>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<!-- Turn on scale tests-->
<profile>
<id>scale</id>
<activation>
<property>
<name>scale</name>
</property>
</activation>
<properties>
<fs.azure.scale.test.enabled>true</fs.azure.scale.test.enabled>
</properties>
</profile>
</profiles>
</project>

View File

@ -346,7 +346,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
private String delegationToken;
/** The error message template when container is not accessible. */
static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials found for "
public static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials found for "
+ "account %s in the configuration, and its container %s is not "
+ "accessible using anonymous credentials. Please check if the container "
+ "exists first. If it is not publicly available, you have to provide "

View File

@ -519,96 +519,8 @@ The maximum number of entries that that cache can hold can be customized using t
<value>true</value>
</property>
```
## Testing the hadoop-azure Module
The hadoop-azure module includes a full suite of unit tests. Most of the tests
will run without additional configuration by running `mvn test`. This includes
tests against mocked storage, which is an in-memory emulation of Azure Storage.
A selection of tests can run against the
[Azure Storage Emulator](http://msdn.microsoft.com/en-us/library/azure/hh403989.aspx)
which is a high-fidelity emulation of live Azure Storage. The emulator is
sufficient for high-confidence testing. The emulator is a Windows executable
that runs on a local machine.
To use the emulator, install Azure SDK 2.3 and start the storage emulator. Then,
edit `src/test/resources/azure-test.xml` and add the following property:
```xml
<property>
<name>fs.azure.test.emulator</name>
<value>true</value>
</property>
```
There is a known issue when running tests with the emulator. You may see the
following failure message:
com.microsoft.windowsazure.storage.StorageException: The value for one of the HTTP headers is not in the correct format.
To resolve this, restart the Azure Emulator. Ensure it v3.2 or later.
It's also possible to run tests against a live Azure Storage account by saving a
file to `src/test/resources/azure-auth-keys.xml` and setting
the name of the storage account and its access key.
For example:
```xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.azure.test.account.name</name>
<value>{ACCOUNTNAME}.blob.core.windows.net</value>
</property>
<property>
<name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
<value>{ACCOUNT ACCESS KEY}</value>
</property>
</configuration>
```
To run contract tests, set the WASB file system URI in `src/test/resources/azure-auth-keys.xml`
and the account access key. For example:
```xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.contract.test.fs.wasb</name>
<value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
<description>The name of the azure file system for testing.</description>
</property>
<property>
<name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
<value>{ACCOUNT ACCESS KEY}</value>
</property>
</configuration>
```
Overall, to run all the tests using `mvn test`, a sample `azure-auth-keys.xml` is like following:
```xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.azure.test.account.name</name>
<value>{ACCOUNTNAME}.blob.core.windows.net</value>
</property>
<property>
<name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
<value>{ACCOUNT ACCESS KEY}</value>
</property>
<property>
<name>fs.contract.test.fs.wasb</name>
<value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
</property>
</configuration>
```
DO NOT ADD `azure-auth-keys.xml` TO REVISION CONTROL. The keys to your Azure
Storage account are a secret and must not be shared.
## Further Reading
* [Testing the Azure WASB client](testing_azure.html).
* MSDN article, [Understanding Block Blobs, Append Blobs, and Page Blobs](https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs)

View File

@ -0,0 +1,576 @@
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
# Testing the Azure WASB client
<!-- MACRO{toc|fromDepth=0|toDepth=5} -->
This module includes both unit tests, which can run in isolation without
connecting to the Azure Storage service, and integration tests, which require a working
connection to interact with a container. Unit test suites follow the naming
convention `Test*.java`. Integration tests follow the naming convention
`ITest*.java`.
## Policy for submitting patches which affect the `hadoop-azure` module.
The Apache Jenkins infrastucture does not run any cloud integration tests,
due to the need to keep credentials secure.
### The submitter of any patch is required to run all the integration tests and declare which Azure region they used.
This is important: **patches which do not include this declaration will be ignored**
This policy has proven to be the only mechanism to guarantee full regression
testing of code changes. Why the declaration of region? Two reasons
1. It helps us identify regressions which only surface against specific endpoints.
1. It forces the submitters to be more honest about their testing. It's easy
to lie, "yes, I tested this". To say "yes, I tested this against Azure US-west"
is a more specific lie and harder to make. And, if you get caught out: you
lose all credibility with the project.
You don't need to test from a VM within the Azure infrastructure, all you need
are credentials.
It's neither hard nor expensive to run the tests; if you can't,
there's no guarantee your patch works. The reviewers have enough to do, and
don't have the time to do these tests, especially as every failure will simply
make for a slow iterative development.
Please: run the tests. And if you don't, we are sorry for declining your
patch, but we have to.
### What if there's an intermittent failure of a test?
Some of the tests do fail intermittently, especially in parallel runs.
If this happens, try to run the test on its own to see if the test succeeds.
If it still fails, include this fact in your declaration. We know some tests
are intermittently unreliable.
### What if the tests are timing out or failing over my network connection?
The tests are designed to be configurable for different
timeouts. If you are seeing problems and this configuration isn't working,
that's a sign of the configuration mechanism isn't complete. If it's happening
in the production code, that could be a sign of a problem which may surface
over long-haul connections. Please help us identify and fix these problems
&mdash; especially as you are the one best placed to verify the fixes work.
## Setting up the tests
## Testing the `hadoop-azure` Module
The `hadoop-azure` module includes a full suite of unit tests. Many of the tests
will run without additional configuration by running `mvn test`. This includes
tests against mocked storage, which is an in-memory emulation of Azure Storage.
The integration tests are designed to test directly against an Azure storage
service, and require an account and credentials in order to run.
This is done by creating the file to `src/test/resources/azure-auth-keys.xml`
and setting the name of the storage account and its access key.
For example:
```xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.azure.test.account.name</name>
<value>{ACCOUNTNAME}.blob.core.windows.net</value>
</property>
<property>
<name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
<value>{ACCOUNT ACCESS KEY}</value>
</property>
</configuration>
```
To run contract tests, set the WASB file system URI in `src/test/resources/azure-auth-keys.xml`
and the account access key. For example:
```xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.contract.test.fs.wasb</name>
<value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
<description>The name of the azure file system for testing.</description>
</property>
<property>
<name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
<value>{ACCOUNT ACCESS KEY}</value>
</property>
</configuration>
```
Overall, to run all the tests using `mvn test`, a sample `azure-auth-keys.xml` is like following:
```xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.azure.test.account.name</name>
<value>{ACCOUNTNAME}.blob.core.windows.net</value>
</property>
<property>
<name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
<value>{ACCOUNT ACCESS KEY}</value>
</property>
<property>
<name>fs.contract.test.fs.wasb</name>
<value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
</property>
</configuration>
```
DO NOT ADD `azure-auth-keys.xml` TO REVISION CONTROL. The keys to your Azure
Storage account are a secret and must not be shared.
## Running the Tests
After completing the configuration, execute the test run through Maven.
```bash
mvn -T 1C clean verify
```
It's also possible to execute multiple test suites in parallel by passing the
`parallel-tests` property on the command line. The tests spend most of their
time blocked on network I/O, so running in parallel tends to
complete full test runs faster.
```bash
mvn -T 1C -Dparallel-tests clean verify
```
Some tests must run with exclusive access to the storage container, so even with the
`parallel-tests` property, several test suites will run in serial in a separate
Maven execution step after the parallel tests.
By default, `parallel-tests` runs 4 test suites concurrently. This can be tuned
by passing the `testsThreadCount` property.
```bash
mvn -T 1C -Dparallel-tests -DtestsThreadCount=8 clean verify
```
<!---
To run just unit tests, which do not require Azure connectivity or credentials,
use any of the above invocations, but switch the goal to `test` instead of
`verify`.
-->
```bash
mvn -T 1C clean test
mvn -T 1C -Dparallel-tests clean test
mvn -T 1C -Dparallel-tests -DtestsThreadCount=8 clean test
```
To run only a specific named subset of tests, pass the `test` property for unit
tests or the `it.test` property for integration tests.
```bash
mvn -T 1C clean test -Dtest=TestRollingWindowAverage
mvn -T 1C clean verify -Dscale -Dit.test=ITestFileSystemOperationExceptionMessage -Dtest=none
mvn -T 1C clean verify -Dtest=none -Dit.test=ITest*
```
Note
1. When running a specific subset of tests, the patterns passed in `test`
and `it.test` override the configuration of which tests need to run in isolation
in a separate serial phase (mentioned above). This can cause unpredictable
results, so the recommendation is to avoid passing `parallel-tests` in
combination with `test` or `it.test`. If you know that you are specifying only
tests that can run safely in parallel, then it will work. For wide patterns,
like `ITest*` shown above, it may cause unpredictable test failures.
2. The command line shell may try to expand the "*" and sometimes the "#" symbols
in test patterns. In such situations, escape the character it with a "\\" prefix.
Example:
mvn -T 1C clean verify -Dtest=none -Dit.test=ITest\*
## Viewing the results
Integration test results and logs are stored in `target/failsafe-reports/`.
An HTML report can be generated during site generation, or with the `surefire-report`
plugin:
```bash
# for the unit tests
mvn -T 1C surefire-report:report-only
# for the integration tests
mvn -T 1C surefire-report:failsafe-report-only
# all reports for this module
mvn -T 1C site:site
```
## Scale Tests
There are a set of tests designed to measure the scalability and performance
at scale of the filesystem client, *Scale Tests*. Tests include: creating
and traversing directory trees, uploading large files, renaming them,
deleting them, seeking through the files, performing random IO, and others.
This makes them a foundational part of the benchmarking.
By their very nature they are slow. And, as their execution time is often
limited by bandwidth between the computer running the tests and the Azure endpoint,
parallel execution does not speed these tests up.
### Enabling the Scale Tests
The tests are enabled if the `scale` property is set in the maven build
this can be done regardless of whether or not the parallel test profile
is used
```bash
mvn -T 1C verify -Dscale
mvn -T 1C verify -Dparallel-tests -Dscale -DtestsThreadCount=8
```
The most bandwidth intensive tests (those which upload data) always run
sequentially; those which are slow due to HTTPS setup costs or server-side
actions are included in the set of parallelized tests.
### Scale test tuning options
Some of the tests can be tuned from the maven build or from the
configuration file used to run the tests.
```bash
mvn -T 1C verify -Dparallel-tests -Dscale -DtestsThreadCount=8 -Dfs.azure.scale.test.huge.filesize=128M
```
The algorithm is
1. The value is queried from the configuration file, using a default value if
it is not set.
1. The value is queried from the JVM System Properties, where it is passed
down by maven.
1. If the system property is null, an empty string, or it has the value `unset`,
then the configuration value is used. The `unset` option is used to
[work round a quirk in maven property propagation](http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven).
Only a few properties can be set this way; more will be added.
| Property | Meaninging |
|-----------|-------------|
| `fs.azure.scale.test.huge.filesize`| Size for huge file uploads |
| `fs.azure.scale.test.huge.huge.partitionsize`| Size for partitions in huge file uploads |
The file and partition sizes are numeric values with a k/m/g/t/p suffix depending
on the desired size. For example: 128M, 128m, 2G, 2G, 4T or even 1P.
#### Scale test configuration options
Some scale tests perform multiple operations (such as creating many directories).
The exact number of operations to perform is configurable in the option
`scale.test.operation.count`
```xml
<property>
<name>scale.test.operation.count</name>
<value>10</value>
</property>
```
Larger values generate more load, and are recommended when testing locally,
or in batch runs.
Smaller values results in faster test runs, especially when the object
store is a long way away.
Operations which work on directories have a separate option: this controls
the width and depth of tests creating recursive directories. Larger
values create exponentially more directories, with consequent performance
impact.
```xml
<property>
<name>scale.test.directory.count</name>
<value>2</value>
</property>
```
DistCp tests targeting Azure support a configurable file size. The default is
10 MB, but the configuration value is expressed in KB so that it can be tuned
smaller to achieve faster test runs.
```xml
<property>
<name>scale.test.distcp.file.size.kb</name>
<value>10240</value>
</property>
```
Azure-specific scale test properties are
##### `fs.azure.scale.test.huge.filesize`: size in MB for "Huge file tests".
The Huge File tests validate Azure storages's ability to handle large files —the property
`fs.azure.scale.test.huge.filesize` declares the file size to use.
```xml
<property>
<name>fs.azure.scale.test.huge.filesize</name>
<value>200M</value>
</property>
```
Tests at this scale are slow: they are best executed from hosts running in
the cloud infrastructure where the storage endpoint is based.
## Using the emulator
A selection of tests can run against the
[Azure Storage Emulator](http://msdn.microsoft.com/en-us/library/azure/hh403989.aspx)
which is a high-fidelity emulation of live Azure Storage. The emulator is
sufficient for high-confidence testing. The emulator is a Windows executable
that runs on a local machine.
To use the emulator, install Azure SDK 2.3 and start the storage emulator. Then,
edit `src/test/resources/azure-test.xml` and add the following property:
```xml
<property>
<name>fs.azure.test.emulator</name>
<value>true</value>
</property>
```
There is a known issue when running tests with the emulator. You may see the
following failure message:
com.microsoft.windowsazure.storage.StorageException: The value for one of the HTTP headers is not in the correct format.
To resolve this, restart the Azure Emulator. Ensure it is v3.2 or later.
## Debugging Test failures
Logging at debug level is the standard way to provide more diagnostics output;
after setting this rerun the tests
```properties
log4j.logger.org.apache.hadoop.fs.azure=DEBUG
```
## Adding new tests
New tests are always welcome. Bear in mind that we need to keep costs
and test time down, which is done by
* Not duplicating tests.
* Being efficient in your use of Hadoop API calls.
* Isolating large/slow tests into the "scale" test group.
* Designing all tests to execute in parallel (where possible).
* Adding new probes and predicates into existing tests, albeit carefully.
*No duplication*: if an operation is tested elsewhere, don't repeat it. This
applies as much for metadata operations as it does for bulk IO. If a new
test case is added which completely obsoletes an existing test, it is OK
to cut the previous one —after showing that coverage is not worsened.
*Efficient*: prefer the `getFileStatus()` and examining the results, rather than
call to `exists()`, `isFile()`, etc.
*Fail with useful information:* provide as much diagnostics as possible
on a failure. Using `org.apache.hadoop.fs.contract.ContractTestUtils` to make
assertions about the state of a filesystem helps here.
*Isolating Scale tests*. Any test doing large amounts of IO MUST extend the
class `AbstractAzureScaleTest`, so only running if `scale` is defined on a build,
supporting test timeouts configurable by the user. Scale tests should also
support configurability as to the actual size of objects/number of operations,
so that behavior at different scale can be verified.
*Designed for parallel execution*. A key need here is for each test suite to work
on isolated parts of the filesystem. Subclasses of `AbstractWasbTestBase`
SHOULD use the `path()`, `methodpath()` and `blobpath()` methods,
to build isolated paths. Tests MUST NOT assume that they have exclusive access
to a bucket.
*Extending existing tests where appropriate*. This recommendation goes
against normal testing best practise of "test one thing per method".
Because it is so slow to create directory trees or upload large files, we do
not have that luxury. All the tests against real endpoints are integration
tests where sharing test setup and teardown saves time and money.
A standard way to do this is to extend existing tests with some extra predicates,
rather than write new tests. When doing this, make sure that the new predicates
fail with meaningful diagnostics, so any new problems can be easily debugged
from test logs.
### Requirements of new Tests
This is what we expect from new tests; they're an extension of the normal
Hadoop requirements, based on the need to work with remote servers whose
use requires the presence of secret credentials, where tests may be slow,
and where finding out why something failed from nothing but the test output
is critical.
#### Subclasses Existing Shared Base Blasses
There are a set of base classes which should be extended for Azure tests and
integration tests.
##### `org.apache.hadoop.fs.azure.AbstractWasbTestWithTimeout`
This extends the junit `Assert` class with thread names and timeouts,
the default timeout being set in `AzureTestConstants.AZURE_TEST_TIMEOUT` to
ten minutes. The thread names are set to aid analyzing the stack trace of
a test: a `jstack` call can be used to
##### `org.apache.hadoop.fs.azure.AbstractWasbTestBase`
The base class for tests which use `AzureBlobStorageTestAccount` to create
mock or live Azure clients; in test teardown it tries to clean up store state.
1. This class requires subclasses to implement `createTestAccount()` to create
a mock or real test account.
1. The configuration used to create a test account *should* be that from
`createConfiguration()`; this can be extended in subclasses to tune the settings.
##### `org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest`
This extends `AbstractWasbTestBase` for scale tests; those test which
only run when `-Dscale` is used to select the "scale" profile.
These tests have a timeout of 30 minutes, so as to support slow test runs.
Having shared base classes help reduces future maintenance. Please
use them.
#### Secure
Don't ever log credentials. The credential tests go out of their way to
not provide meaningful logs or assertion messages precisely to avoid this.
#### Efficient of Time and Money
This means efficient in test setup/teardown, and, ideally, making use of
existing public datasets to save setup time and tester cost.
The reference example is `ITestAzureHugeFiles`:. This marks the test suite as
`@FixMethodOrder(MethodSorters.NAME_ASCENDING)` then orders the test cases such
that each test case expects the previous test to have completed (here: uploaded a file,
renamed a file, ...). This provides for independent tests in the reports, yet still
permits an ordered sequence of operations. Do note the use of `Assume.assume()`
to detect when the preconditions for a single test case are not met, hence,
the tests become skipped, rather than fail with a trace which is really a false alarm.
### Works Over Long-haul Links
As well as making file size and operation counts scaleable, this includes
making test timeouts adequate. The Scale tests make this configurable; it's
hard coded to ten minutes in `AbstractAzureIntegrationTest()`; subclasses can
change this by overriding `getTestTimeoutMillis()`.
Equally importantly: support proxies, as some testers need them.
### Provides Diagnostics and timing information
1. Create logs, log things.
1. you can use `AbstractWasbTestBase.describe(format-string, args)` here; it
adds some newlines so as to be easier to spot.
1. Use `ContractTestUtils.NanoTimer` to measure the duration of operations,
and log the output.
#### Fails Meaningfully
The `ContractTestUtils` class contains a whole set of assertions for making
statements about the expected state of a filesystem, e.g.
`assertPathExists(FS, path)`, `assertPathDoesNotExists(FS, path)`, and others.
These do their best to provide meaningful diagnostics on failures (e.g. directory
listings, file status, ...), so help make failures easier to understand.
At the very least, *do not use `assertTrue()` or `assertFalse()` without
including error messages*.
### Cleans Up Afterwards
Keeps costs down.
1. Do not only cleanup if a test case completes successfully; test suite
teardown must do it.
1. That teardown code must check for the filesystem and other fields being
null before the cleanup. Why? If test setup fails, the teardown methods still
get called.
### Works Reliably
We really appreciate this &mdash; you will too.
## Tips
### How to keep your credentials really safe
Although the `auth-keys.xml` file is marged as ignored in git and subversion,
it is still in your source tree, and there's always that risk that it may
creep out.
You can avoid this by keeping your keys outside the source tree and
using an absolute XInclude reference to it.
```xml
<configuration>
<include xmlns="http://www.w3.org/2001/XInclude"
href="file:///users/qe/.auth-keys.xml" />
</configuration>
```
### Cleaning up Containers
The Azure tests create containers with the prefix `"wasbtests-"` and delete
them after the test runs. If a test run is interrupted, these containers
may not get deleted. There is a special test case which can be manually invoked
to list and delete these, `CleanupTestContainers`
```bash
mvn test -Dtest=CleanupTestContainers
```
This will delete the containers; the output log of the test run will
provide the details and summary of the operation.

View File

@ -18,15 +18,21 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assume.assumeNotNull;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Before;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
import org.apache.hadoop.io.IOUtils;
import static org.junit.Assume.assumeNotNull;
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
/**
* Abstract test class that provides basic setup and teardown of testing Azure
* Storage account. Each subclass defines a different set of test cases to run
@ -34,41 +40,137 @@ import org.slf4j.LoggerFactory;
* to run those tests. The returned account might integrate with Azure Storage
* directly or it might be a mock implementation.
*/
public abstract class AbstractWasbTestBase {
public abstract class AbstractWasbTestBase extends AbstractWasbTestWithTimeout
implements AzureTestConstants {
protected static final Logger LOG =
LoggerFactory.getLogger(AbstractWasbTestBase.class);
@VisibleForTesting
protected NativeAzureFileSystem fs;
private AzureBlobStorageTestAccount testAccount;
protected AzureBlobStorageTestAccount testAccount;
@Before
public void setUp() throws Exception {
testAccount = createTestAccount();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(testAccount);
AzureBlobStorageTestAccount account = createTestAccount();
assumeNotNull(account);
bindToTestAccount(account);
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
describe("closing test account and filesystem");
testAccount = cleanupTestAccount(testAccount);
IOUtils.closeStream(fs);
fs = null;
}
public Configuration getConfiguration() {
return new Configuration();
/**
* Create the configuration to use when creating a test account.
* Subclasses can override this to tune the test account configuration.
* @return a configuration.
*/
public Configuration createConfiguration() {
return AzureBlobStorageTestAccount.createTestConfiguration();
}
/**
* Create the test account.
* Subclasses must implement this.
* @return the test account.
* @throws Exception
*/
protected abstract AzureBlobStorageTestAccount createTestAccount()
throws Exception;
/**
* Get the test account.
* @return the current test account.
*/
protected AzureBlobStorageTestAccount getTestAccount() {
return testAccount;
}
/**
* Get the filesystem
* @return the current filesystem.
*/
protected NativeAzureFileSystem getFileSystem() {
return fs;
}
/**
* Get the configuration used to create the filesystem
* @return the configuration of the test FS
*/
protected Configuration getConfiguration() {
return getFileSystem().getConf();
}
/**
* Bind to a new test account; closing any existing one.
* This updates the test account returned in {@link #getTestAccount()}
* and the filesystem in {@link #getFileSystem()}.
* @param account new test account
*/
protected void bindToTestAccount(AzureBlobStorageTestAccount account) {
// clean any existing test account
cleanupTestAccount(testAccount);
IOUtils.closeStream(fs);
testAccount = account;
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
}
/**
* Return a path to a blob which will be unique for this fork.
* @param filepath filepath
* @return a path under the default blob directory
* @throws IOException
*/
protected Path blobPath(String filepath) throws IOException {
return blobPathForTests(getFileSystem(), filepath);
}
/**
* Create a path under the test path provided by
* the FS contract.
* @param filepath path string in
* @return a path qualified by the test filesystem
* @throws IOException IO problems
*/
protected Path path(String filepath) throws IOException {
return pathForTests(getFileSystem(), filepath);
}
/**
* Return a path bonded to this method name, unique to this fork during
* parallel execution.
* @return a method name unique to (fork, method).
* @throws IOException IO problems
*/
protected Path methodPath() throws IOException {
return path(methodName.getMethodName());
}
/**
* Return a blob path bonded to this method name, unique to this fork during
* parallel execution.
* @return a method name unique to (fork, method).
* @throws IOException IO problems
*/
protected Path methodBlobPath() throws IOException {
return blobPath(methodName.getMethodName());
}
/**
* Describe a test in the logs.
* @param text text to print
* @param args arguments to format in the printing
*/
protected void describe(String text, Object... args) {
LOG.info("\n\n{}: {}\n",
methodName.getMethodName(),
String.format(text, args));
}
}

View File

@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.TestName;
import org.junit.rules.Timeout;
import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
/**
* Base class for any Wasb test with timeouts & named threads.
* This class does not attempt to bind to Azure.
*/
public class AbstractWasbTestWithTimeout extends Assert {
/**
* The name of the current method.
*/
@Rule
public TestName methodName = new TestName();
/**
* Set the timeout for every test.
* This is driven by the value returned by {@link #getTestTimeoutMillis()}.
*/
@Rule
public Timeout testTimeout = new Timeout(getTestTimeoutMillis());
/**
* Name the junit thread for the class. This will overridden
* before the individual test methods are run.
*/
@BeforeClass
public static void nameTestThread() {
Thread.currentThread().setName("JUnit");
}
/**
* Name the thread to the current test method.
*/
@Before
public void nameThread() {
Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
}
/**
* Override point: the test timeout in milliseconds.
* @return a timeout in milliseconds
*/
protected int getTestTimeoutMillis() {
return AzureTestConstants.AZURE_TEST_TIMEOUT;
}
}

View File

@ -21,12 +21,15 @@ package org.apache.hadoop.fs.azure;
import com.microsoft.azure.storage.*;
import com.microsoft.azure.storage.blob.*;
import com.microsoft.azure.storage.core.Base64;
import org.apache.commons.configuration2.SubsetConfiguration;
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.configuration2.SubsetConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemMetricsSystem;
import org.apache.hadoop.metrics2.AbstractMetric;
@ -35,6 +38,8 @@ import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.*;
@ -46,10 +51,10 @@ import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECU
/**
* Helper class to create WASB file systems backed by either a mock in-memory
* implementation or a real Azure Storage account. See RunningLiveWasbTests.txt
* for instructions on how to connect to a real Azure Storage account.
* implementation or a real Azure Storage account.
*/
public final class AzureBlobStorageTestAccount {
public final class AzureBlobStorageTestAccount implements AutoCloseable,
AzureTestConstants {
private static final Logger LOG = LoggerFactory.getLogger(
AzureBlobStorageTestAccount.class);
@ -166,6 +171,7 @@ public final class AzureBlobStorageTestAccount {
return new Path("/" + DEFAULT_PAGE_BLOB_DIRECTORY);
}
@Deprecated
public static Path pageBlobPath(String fileName) {
return new Path(pageBlobPath(), fileName);
}
@ -201,6 +207,9 @@ public final class AzureBlobStorageTestAccount {
* @return
*/
private boolean wasGeneratedByMe(MetricsRecord currentRecord) {
Assert.assertNotNull("null filesystem", fs);
Assert.assertNotNull("null filesystemn instance ID",
fs.getInstrumentation().getFileSystemInstanceId());
String myFsId = fs.getInstrumentation().getFileSystemInstanceId().toString();
for (MetricsTag currentTag : currentRecord.tags()) {
if (currentTag.name().equalsIgnoreCase("wasbFileSystemId")) {
@ -247,13 +256,16 @@ public final class AzureBlobStorageTestAccount {
getBlobReference(blobKey).releaseLease(accessCondition);
}
private static void saveMetricsConfigFile() {
private static void saveMetricsConfigFile() throws IOException {
if (!metricsConfigSaved) {
String testFilename = TestMetricsConfig.getTestFilename(
"hadoop-metrics2-azure-file-system");
File dest = new File(testFilename).getCanonicalFile();
dest.getParentFile().mkdirs();
new org.apache.hadoop.metrics2.impl.ConfigBuilder()
.add("azure-file-system.sink.azuretestcollector.class",
StandardCollector.class.getName())
.save(TestMetricsConfig.getTestFilename(
"hadoop-metrics2-azure-file-system.properties"));
.save(testFilename);
metricsConfigSaved = true;
}
}
@ -314,9 +326,8 @@ public final class AzureBlobStorageTestAccount {
Configuration conf = createTestConfiguration();
if (!conf.getBoolean(USE_EMULATOR_PROPERTY_NAME, false)) {
// Not configured to test against the storage emulator.
LOG.warn("Skipping emulator Azure test because configuration doesn't "
+ "indicate that it's running. Please see RunningLiveWasbTests.txt "
+ "for guidance.");
LOG.warn("Skipping emulator Azure test because configuration "
+ "doesn't indicate that it's running.");
return null;
}
CloudStorageAccount account =
@ -482,8 +493,7 @@ public final class AzureBlobStorageTestAccount {
credentials = StorageCredentialsAnonymous.ANONYMOUS;
} else {
LOG.warn("Skipping live Azure test because of missing key for"
+ " account '" + accountName + "'. "
+ "Please see RunningLiveWasbTests.txt for guidance.");
+ " account '" + accountName + "'.");
return null;
}
} else {
@ -517,8 +527,7 @@ public final class AzureBlobStorageTestAccount {
throws URISyntaxException, KeyProviderException {
String testAccountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
if (testAccountName == null) {
LOG.warn("Skipping live Azure test because of missing test account. "
+ "Please see RunningLiveWasbTests.txt for guidance.");
LOG.warn("Skipping live Azure test because of missing test account");
return null;
}
return createStorageAccount(testAccountName, conf, false);
@ -863,6 +872,11 @@ public final class AzureBlobStorageTestAccount {
}
}
@Override
public void close() throws Exception {
cleanup();
}
public NativeAzureFileSystem getFileSystem() {
return fs;
}

View File

@ -18,21 +18,26 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeNotNull;
import java.io.*;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import org.apache.hadoop.fs.azure.AzureException;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestAzureConcurrentOutOfBandIo {
/**
* Handle OOB IO into a shared container.
*/
public class ITestAzureConcurrentOutOfBandIo extends AbstractWasbTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(ITestAzureConcurrentOutOfBandIo.class);
// Class constants.
static final int DOWNLOAD_BLOCK_SIZE = 8 * 1024 * 1024;
@ -42,22 +47,10 @@ public class TestAzureConcurrentOutOfBandIo {
// Number of blocks to be written before flush.
static final int NUMBER_OF_BLOCKS = 2;
protected AzureBlobStorageTestAccount testAccount;
// Overridden TestCase methods.
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createOutOfBandStore(
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.createOutOfBandStore(
UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE);
assumeNotNull(testAccount);
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
}
class DataBlockWriter implements Runnable {
@ -119,13 +112,11 @@ public class TestAzureConcurrentOutOfBandIo {
outputStream.close();
}
} catch (AzureException e) {
System.out
.println("DatablockWriter thread encountered a storage exception."
+ e.getMessage());
LOG.error("DatablockWriter thread encountered a storage exception."
+ e.getMessage(), e);
} catch (IOException e) {
System.out
.println("DatablockWriter thread encountered an I/O exception."
+ e.getMessage());
LOG.error("DatablockWriter thread encountered an I/O exception."
+ e.getMessage(), e);
}
}
}
@ -140,10 +131,11 @@ public class TestAzureConcurrentOutOfBandIo {
//
// Write five 4 MB blocks to the blob. To ensure there is data in the blob before
// reading. This eliminates the race between the reader and writer threads.
OutputStream outputStream = testAccount.getStore().storefile(
"WASB_String.txt",
String key = "WASB_String" + AzureTestUtils.getForkID() + ".txt";
OutputStream outputStream = testAccount.getStore().storefile(
key,
new PermissionStatus("", "", FsPermission.getDefault()),
"WASB_String.txt");
key);
Arrays.fill(dataBlockWrite, (byte) 255);
for (int i = 0; i < NUMBER_OF_BLOCKS; i++) {
outputStream.write(dataBlockWrite);
@ -153,15 +145,12 @@ public class TestAzureConcurrentOutOfBandIo {
outputStream.close();
// Start writing blocks to Azure store using the DataBlockWriter thread.
DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount,
"WASB_String.txt");
writeBlockTask.startWriting();
DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount, key);
writeBlockTask.startWriting();
int count = 0;
InputStream inputStream = null;
for (int i = 0; i < 5; i++) {
try {
inputStream = testAccount.getStore().retrieve("WASB_String.txt");
try(InputStream inputStream = testAccount.getStore().retrieve(key)) {
count = 0;
int c = 0;
@ -179,11 +168,6 @@ public class TestAzureConcurrentOutOfBandIo {
e.printStackTrace();
fail();
}
// Close the stream.
if (null != inputStream){
inputStream.close();
}
}
// Stop writing blocks.
@ -192,4 +176,4 @@ public class TestAzureConcurrentOutOfBandIo {
// Validate that a block was read.
assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE, count);
}
}
}

View File

@ -0,0 +1,33 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
/**
* Extends ITestAzureConcurrentOutOfBandIo in order to run testReadOOBWrites with secure mode
* (fs.azure.secure.mode) both enabled and disabled.
*/
public class ITestAzureConcurrentOutOfBandIoWithSecureMode
extends ITestAzureConcurrentOutOfBandIo {
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.createOutOfBandStore(
UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE, true);
}
}

View File

@ -18,12 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@ -31,34 +25,41 @@ import java.net.HttpURLConnection;
import java.net.URI;
import java.util.Arrays;
import java.util.HashMap;
import java.util.concurrent.Callable;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.SendingRequestEvent;
import com.microsoft.azure.storage.StorageEvent;
import org.junit.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.junit.Assume.assumeNotNull;
import com.microsoft.azure.storage.OperationContext;
import com.microsoft.azure.storage.SendingRequestEvent;
import com.microsoft.azure.storage.StorageEvent;
public class TestAzureFileSystemErrorConditions {
/**
* Error handling.
*/
public class ITestAzureFileSystemErrorConditions extends
AbstractWasbTestWithTimeout {
private static final int ALL_THREE_FILE_SIZE = 1024;
@Test
public void testNoInitialize() throws Exception {
AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
boolean passed = false;
try {
store.retrieveMetadata("foo");
passed = true;
} catch (AssertionError e) {
}
assertFalse(
"Doing an operation on the store should throw if not initalized.",
passed);
intercept(AssertionError.class,
new Callable<FileMetadata>() {
@Override
public FileMetadata call() throws Exception {
return new AzureNativeFileSystemStore()
.retrieveMetadata("foo");
}
});
}
/**
@ -89,8 +90,7 @@ public class TestAzureFileSystemErrorConditions {
AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
MockStorageInterface mockStorage = new MockStorageInterface();
store.setAzureStorageInteractionLayer(mockStorage);
FileSystem fs = new NativeAzureFileSystem(store);
try {
try (FileSystem fs = new NativeAzureFileSystem(store)) {
Configuration conf = new Configuration();
AzureBlobStorageTestAccount.setMockAccountKey(conf);
HashMap<String, String> metadata = new HashMap<String, String>();
@ -99,19 +99,17 @@ public class TestAzureFileSystemErrorConditions {
mockStorage.addPreExistingContainer(
AzureBlobStorageTestAccount.getMockContainerUri(), metadata);
boolean passed = false;
try {
fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
fs.listStatus(new Path("/"));
passed = true;
} catch (AzureException ex) {
assertTrue("Unexpected exception message: " + ex,
ex.getMessage().contains("unsupported version: 2090-04-05."));
}
assertFalse("Should've thrown an exception because of the wrong version.",
passed);
} finally {
fs.close();
AzureException ex = intercept(AzureException.class,
new Callable<FileStatus[]>() {
@Override
public FileStatus[] call() throws Exception {
fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI),
conf);
return fs.listStatus(new Path("/"));
}
});
GenericTestUtils.assertExceptionContains(
"unsupported version: 2090-04-05.", ex);
}
}
@ -120,7 +118,7 @@ public class TestAzureFileSystemErrorConditions {
}
private class TransientErrorInjector extends StorageEvent<SendingRequestEvent> {
final ConnectionRecognizer connectionRecognizer;
private final ConnectionRecognizer connectionRecognizer;
private boolean injectedErrorOnce = false;
public TransientErrorInjector(ConnectionRecognizer connectionRecognizer) {
@ -129,7 +127,8 @@ public class TestAzureFileSystemErrorConditions {
@Override
public void eventOccurred(SendingRequestEvent eventArg) {
HttpURLConnection connection = (HttpURLConnection)eventArg.getConnectionObject();
HttpURLConnection connection
= (HttpURLConnection) eventArg.getConnectionObject();
if (!connectionRecognizer.isTargetConnection(connection)) {
return;
}
@ -178,10 +177,10 @@ public class TestAzureFileSystemErrorConditions {
private void writeAllThreeFile(NativeAzureFileSystem fs, Path testFile)
throws IOException {
byte[] buffer = new byte[ALL_THREE_FILE_SIZE];
Arrays.fill(buffer, (byte)3);
OutputStream stream = fs.create(testFile);
stream.write(buffer);
stream.close();
Arrays.fill(buffer, (byte) 3);
try(OutputStream stream = fs.create(testFile)) {
stream.write(buffer);
}
}
private void readAllThreeFile(NativeAzureFileSystem fs, Path testFile)

View File

@ -20,10 +20,6 @@ package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_CHECK_BLOCK_MD5;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_STORE_BLOB_MD5;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeNotNull;
import java.io.ByteArrayInputStream;
@ -33,9 +29,12 @@ import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.util.Arrays;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.junit.After;
import org.junit.Test;
@ -54,15 +53,12 @@ import com.microsoft.azure.storage.core.Base64;
* Test that we do proper data integrity validation with MD5 checks as
* configured.
*/
public class TestBlobDataValidation {
public class ITestBlobDataValidation extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
testAccount = AzureTestUtils.cleanupTestAccount(testAccount);
}
/**
@ -86,12 +82,23 @@ public class TestBlobDataValidation {
testStoreBlobMd5(true);
}
/**
* Trims a suffix/prefix from the given string. For example if
* s is given as "/xy" and toTrim is "/", this method returns "xy"
*/
private static String trim(String s, String toTrim) {
return StringUtils.removeEnd(StringUtils.removeStart(s, toTrim),
toTrim);
}
private void testStoreBlobMd5(boolean expectMd5Stored) throws Exception {
assumeNotNull(testAccount);
// Write a test file.
String testFileKey = "testFile";
Path testFilePath = new Path("/" + testFileKey);
OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
NativeAzureFileSystem fs = testAccount.getFileSystem();
Path testFilePath = AzureTestUtils.pathForTests(fs,
methodName.getMethodName());
String testFileKey = trim(testFilePath.toUri().getPath(), "/");
OutputStream outStream = fs.create(testFilePath);
outStream.write(new byte[] { 5, 15 });
outStream.close();
@ -114,7 +121,7 @@ public class TestBlobDataValidation {
// Now read back the content. If we stored the MD5 for the blob content
// we should get a data corruption error.
InputStream inStream = testAccount.getFileSystem().open(testFilePath);
InputStream inStream = fs.open(testFilePath);
try {
byte[] inBuf = new byte[100];
while (inStream.read(inBuf) > 0){

View File

@ -18,23 +18,31 @@
package org.apache.hadoop.fs.azure;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
import junit.framework.*;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Date;
import org.junit.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
/**
* A simple benchmark to find out the difference in speed between block
* and page blobs.
*/
public class TestBlobTypeSpeedDifference extends TestCase {
public class ITestBlobTypeSpeedDifference extends AbstractWasbTestBase {
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
/**
* Writes data to the given stream of the given size, flushing every
* x bytes.
@ -101,8 +109,10 @@ public class TestBlobTypeSpeedDifference extends TestCase {
*/
private static TestResult writePageBlobTestFile(NativeAzureFileSystem fs,
long size, long flushInterval) throws IOException {
Path testFile = AzureTestUtils.blobPathForTests(fs,
"writePageBlobTestFile");
return writeTestFile(fs,
AzureBlobStorageTestAccount.pageBlobPath("pageBlob"),
testFile,
size, flushInterval);
}
@ -111,16 +121,7 @@ public class TestBlobTypeSpeedDifference extends TestCase {
*/
@Test
public void testTenKbFileFrequentFlush() throws Exception {
AzureBlobStorageTestAccount testAccount =
AzureBlobStorageTestAccount.create();
if (testAccount == null) {
return;
}
try {
testForSizeAndFlushInterval(testAccount.getFileSystem(), 10 * 1000, 500);
} finally {
testAccount.cleanup();
}
testForSizeAndFlushInterval(getFileSystem(), 10 * 1000, 500);
}
/**
@ -144,7 +145,7 @@ public class TestBlobTypeSpeedDifference extends TestCase {
* Runs the benchmark for the given file size and flush frequency from the
* command line.
*/
public static void main(String argv[]) throws Exception {
public static void main(String[] argv) throws Exception {
Configuration conf = new Configuration();
long size = 10 * 1000 * 1000;
long flushInterval = 2000;
@ -154,7 +155,9 @@ public class TestBlobTypeSpeedDifference extends TestCase {
if (argv.length > 1) {
flushInterval = Long.parseLong(argv[1]);
}
testForSizeAndFlushInterval((NativeAzureFileSystem)FileSystem.get(conf),
size, flushInterval);
testForSizeAndFlushInterval(
(NativeAzureFileSystem) FileSystem.get(conf),
size,
flushInterval);
}
}

View File

@ -26,9 +26,7 @@ import java.util.Random;
import java.util.concurrent.Callable;
import org.junit.FixMethodOrder;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.runners.MethodSorters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -40,13 +38,11 @@ import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import static org.apache.hadoop.test.LambdaTestUtils.*;
@ -58,9 +54,9 @@ import static org.apache.hadoop.test.LambdaTestUtils.*;
*/
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class TestBlockBlobInputStream extends AbstractWasbTestBase {
public class ITestBlockBlobInputStream extends AbstractAzureScaleTest {
private static final Logger LOG = LoggerFactory.getLogger(
TestBlockBlobInputStream.class);
ITestBlockBlobInputStream.class);
private static final int KILOBYTE = 1024;
private static final int MEGABYTE = KILOBYTE * KILOBYTE;
private static final int TEST_FILE_SIZE = 6 * MEGABYTE;
@ -71,11 +67,8 @@ public class TestBlockBlobInputStream extends AbstractWasbTestBase {
private AzureBlobStorageTestAccount accountUsingInputStreamV2;
private long testFileLength;
/**
* Long test timeout.
*/
@Rule
public Timeout testTimeout = new Timeout(10 * 60 * 1000);
private FileStatus testFileStatus;
private Path hugefile;
@ -867,9 +860,15 @@ public class TestBlockBlobInputStream extends AbstractWasbTestBase {
@Test
public void test_999_DeleteHugeFiles() throws IOException {
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
fs.delete(TEST_FILE_PATH, false);
timer.end("time to delete %s", TEST_FILE_PATH);
try {
NanoTimer timer = new NanoTimer();
NativeAzureFileSystem fs = getFileSystem();
fs.delete(TEST_FILE_PATH, false);
timer.end("time to delete %s", TEST_FILE_PATH);
} finally {
// clean up the test account
AzureTestUtils.cleanupTestAccount(accountUsingInputStreamV1);
}
}
}

View File

@ -18,17 +18,19 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import java.io.FileNotFoundException;
import java.util.EnumSet;
import java.util.concurrent.Callable;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
@ -41,15 +43,13 @@ import com.microsoft.azure.storage.blob.CloudBlockBlob;
/**
* Tests that WASB creates containers only if needed.
*/
public class TestContainerChecks {
public class ITestContainerChecks extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private boolean runningInSASMode = false;
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
testAccount = AzureTestUtils.cleanup(testAccount);
}
@Before
@ -60,8 +60,7 @@ public class TestContainerChecks {
@Test
public void testContainerExistAfterDoesNotExist() throws Exception {
testAccount = AzureBlobStorageTestAccount.create("",
EnumSet.noneOf(CreateOptions.class));
testAccount = blobStorageTestAccount();
assumeNotNull(testAccount);
CloudBlobContainer container = testAccount.getRealContainer();
FileSystem fs = testAccount.getFileSystem();
@ -93,10 +92,15 @@ public class TestContainerChecks {
assertTrue(container.exists());
}
protected AzureBlobStorageTestAccount blobStorageTestAccount()
throws Exception {
return AzureBlobStorageTestAccount.create("",
EnumSet.noneOf(CreateOptions.class));
}
@Test
public void testContainerCreateAfterDoesNotExist() throws Exception {
testAccount = AzureBlobStorageTestAccount.create("",
EnumSet.noneOf(CreateOptions.class));
testAccount = blobStorageTestAccount();
assumeNotNull(testAccount);
CloudBlobContainer container = testAccount.getRealContainer();
FileSystem fs = testAccount.getFileSystem();
@ -125,8 +129,7 @@ public class TestContainerChecks {
@Test
public void testContainerCreateOnWrite() throws Exception {
testAccount = AzureBlobStorageTestAccount.create("",
EnumSet.noneOf(CreateOptions.class));
testAccount = blobStorageTestAccount();
assumeNotNull(testAccount);
CloudBlobContainer container = testAccount.getRealContainer();
FileSystem fs = testAccount.getFileSystem();
@ -145,19 +148,25 @@ public class TestContainerChecks {
assertFalse(container.exists());
// Neither should a read.
try {
fs.open(new Path("/foo"));
assertFalse("Should've thrown.", true);
} catch (FileNotFoundException ex) {
}
Path foo = new Path("/testContainerCreateOnWrite-foo");
Path bar = new Path("/testContainerCreateOnWrite-bar");
LambdaTestUtils.intercept(FileNotFoundException.class,
new Callable<String>() {
@Override
public String call() throws Exception {
fs.open(foo).close();
return "Stream to " + foo;
}
}
);
assertFalse(container.exists());
// Neither should a rename
assertFalse(fs.rename(new Path("/foo"), new Path("/bar")));
assertFalse(fs.rename(foo, bar));
assertFalse(container.exists());
// But a write should.
assertTrue(fs.createNewFile(new Path("/foo")));
assertTrue(fs.createNewFile(foo));
assertTrue(container.exists());
}
@ -176,7 +185,7 @@ public class TestContainerChecks {
// A write should just fail
try {
fs.createNewFile(new Path("/foo"));
fs.createNewFile(new Path("/testContainerChecksWithSas-foo"));
assertFalse("Should've thrown.", true);
} catch (AzureException ex) {
}

View File

@ -24,25 +24,35 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*;
public class TestFileSystemOperationExceptionHandling
/**
* Single threaded exception handling.
*/
public class ITestFileSystemOperationExceptionHandling
extends AbstractWasbTestBase {
private FSDataInputStream inputStream = null;
private static Path testPath = new Path("testfile.dat");
private Path testPath;
private Path testFolderPath;
private static Path testFolderPath = new Path("testfolder");
@Override
public void setUp() throws Exception {
super.setUp();
testPath = path("testfile.dat");
testFolderPath = path("testfolder");
}
/*
/**
* Helper method that creates a InputStream to validate exceptions
* for various scenarios
* for various scenarios.
*/
private void setupInputStreamToTest(AzureBlobStorageTestAccount testAccount)
throws Exception {
@ -50,8 +60,9 @@ public class TestFileSystemOperationExceptionHandling
FileSystem fs = testAccount.getFileSystem();
// Step 1: Create a file and write dummy data.
Path testFilePath1 = new Path("test1.dat");
Path testFilePath2 = new Path("test2.dat");
Path base = methodPath();
Path testFilePath1 = new Path(base, "test1.dat");
Path testFilePath2 = new Path(base, "test2.dat");
FSDataOutputStream outputStream = fs.create(testFilePath1);
String testString = "This is a test string";
outputStream.write(testString.getBytes());
@ -64,28 +75,28 @@ public class TestFileSystemOperationExceptionHandling
fs.rename(testFilePath1, testFilePath2);
}
/*
/**
* Tests a basic single threaded read scenario for Page blobs.
*/
@Test(expected=FileNotFoundException.class)
public void testSingleThreadedPageBlobReadScenario() throws Throwable {
AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
setupInputStreamToTest(testAccount);
byte[] readBuffer = new byte[512];
inputStream.read(readBuffer);
}
/*
/**
* Tests a basic single threaded seek scenario for Page blobs.
*/
@Test(expected=FileNotFoundException.class)
public void testSingleThreadedPageBlobSeekScenario() throws Throwable {
AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
setupInputStreamToTest(testAccount);
inputStream.seek(5);
}
/*
/**
* Test a basic single thread seek scenario for Block blobs.
*/
@Test(expected=FileNotFoundException.class)
@ -97,7 +108,7 @@ public class TestFileSystemOperationExceptionHandling
inputStream.read();
}
/*
/**
* Tests a basic single threaded read scenario for Block blobs.
*/
@Test(expected=FileNotFoundException.class)
@ -108,144 +119,147 @@ public class TestFileSystemOperationExceptionHandling
inputStream.read(readBuffer);
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic single threaded setPermission scenario
/**
* Tests basic single threaded setPermission scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testSingleThreadedBlockBlobSetPermissionScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
createEmptyFile(createTestAccount(), testPath);
fs.delete(testPath, true);
fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
fs.setPermission(testPath,
new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic single threaded setPermission scenario
/**
* Tests basic single threaded setPermission scenario.
*/
public void testSingleThreadedPageBlobSetPermissionScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
testPath);
@Test(expected = FileNotFoundException.class)
public void testSingleThreadedPageBlobSetPermissionScenario()
throws Throwable {
createEmptyFile(getPageBlobTestStorageAccount(), testPath);
fs.delete(testPath, true);
fs.setOwner(testPath, "testowner", "testgroup");
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic single threaded setPermission scenario
/**
* Tests basic single threaded setPermission scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testSingleThreadedBlockBlobSetOwnerScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
createEmptyFile(createTestAccount(), testPath);
fs.delete(testPath, true);
fs.setOwner(testPath, "testowner", "testgroup");
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic single threaded setPermission scenario
/**
* Tests basic single threaded setPermission scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testSingleThreadedPageBlobSetOwnerScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
createEmptyFile(getPageBlobTestStorageAccount(),
testPath);
fs.delete(testPath, true);
fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
fs.setPermission(testPath,
new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
}
@Test(expected=FileNotFoundException.class)
/*
* Test basic single threaded listStatus scenario
/**
* Test basic single threaded listStatus scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testSingleThreadedBlockBlobListStatusScenario() throws Throwable {
ExceptionHandlingTestHelper.createTestFolder(createTestAccount(), testFolderPath);
fs.delete(testFolderPath, true);
fs.listStatus(testFolderPath);
}
@Test(expected=FileNotFoundException.class)
/*
* Test basica single threaded listStatus scenario
*/
public void testSingleThreadedPageBlobListStatusScenario() throws Throwable {
ExceptionHandlingTestHelper.createTestFolder(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
createTestFolder(createTestAccount(),
testFolderPath);
fs.delete(testFolderPath, true);
fs.listStatus(testFolderPath);
}
@Test
/*
* Test basic single threaded listStatus scenario
/**
* Test basic single threaded listStatus scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testSingleThreadedPageBlobListStatusScenario() throws Throwable {
createTestFolder(getPageBlobTestStorageAccount(),
testFolderPath);
fs.delete(testFolderPath, true);
fs.listStatus(testFolderPath);
}
/**
* Test basic single threaded listStatus scenario.
*/
@Test
public void testSingleThreadedBlockBlobRenameScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
createEmptyFile(createTestAccount(),
testPath);
Path dstPath = new Path("dstFile.dat");
fs.delete(testPath, true);
boolean renameResult = fs.rename(testPath, dstPath);
Assert.assertFalse(renameResult);
assertFalse(renameResult);
}
@Test
/*
* Test basic single threaded listStatus scenario
/**
* Test basic single threaded listStatus scenario.
*/
@Test
public void testSingleThreadedPageBlobRenameScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
createEmptyFile(getPageBlobTestStorageAccount(),
testPath);
Path dstPath = new Path("dstFile.dat");
fs.delete(testPath, true);
boolean renameResult = fs.rename(testPath, dstPath);
Assert.assertFalse(renameResult);
assertFalse(renameResult);
}
@Test
/*
* Test basic single threaded listStatus scenario
/**
* Test basic single threaded listStatus scenario.
*/
@Test
public void testSingleThreadedBlockBlobDeleteScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
createEmptyFile(createTestAccount(),
testPath);
fs.delete(testPath, true);
boolean deleteResult = fs.delete(testPath, true);
Assert.assertFalse(deleteResult);
assertFalse(deleteResult);
}
@Test
/*
* Test basic single threaded listStatus scenario
/**
* Test basic single threaded listStatus scenario.
*/
@Test
public void testSingleThreadedPageBlobDeleteScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
createEmptyFile(getPageBlobTestStorageAccount(),
testPath);
fs.delete(testPath, true);
boolean deleteResult = fs.delete(testPath, true);
Assert.assertFalse(deleteResult);
assertFalse(deleteResult);
}
@Test(expected=FileNotFoundException.class)
/*
* Test basic single threaded listStatus scenario
/**
* Test basic single threaded listStatus scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testSingleThreadedBlockBlobOpenScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
createEmptyFile(createTestAccount(),
testPath);
fs.delete(testPath, true);
inputStream = fs.open(testPath);
}
@Test(expected=FileNotFoundException.class)
/*
* Test basic single threaded listStatus scenario
/**
* Test delete then open a file.
*/
@Test(expected = FileNotFoundException.class)
public void testSingleThreadedPageBlobOpenScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
createEmptyFile(getPageBlobTestStorageAccount(),
testPath);
fs.delete(testPath, true);
inputStream = fs.open(testPath);
@ -257,13 +271,13 @@ public class TestFileSystemOperationExceptionHandling
inputStream.close();
}
if (fs != null && fs.exists(testPath)) {
fs.delete(testPath, true);
}
ContractTestUtils.rm(fs, testPath, true, true);
super.tearDown();
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
protected AzureBlobStorageTestAccount createTestAccount()
throws Exception {
return AzureBlobStorageTestAccount.create();
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -17,63 +17,63 @@
*/
package org.apache.hadoop.fs.azure;
import java.net.URI;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import com.microsoft.azure.storage.CloudStorageAccount;
import org.junit.Test;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
/**
* Test for error messages coming from SDK.
*/
public class ITestFileSystemOperationExceptionMessage
extends AbstractWasbTestWithTimeout {
public class TestFileSystemOperationExceptionMessage extends
NativeAzureFileSystemBaseTest {
@Test
public void testAnonymouseCredentialExceptionMessage() throws Throwable{
public void testAnonymouseCredentialExceptionMessage() throws Throwable {
Configuration conf = AzureBlobStorageTestAccount.createTestConfiguration();
CloudStorageAccount account =
AzureBlobStorageTestAccount.createTestAccount(conf);
AzureTestUtils.assume("No test account", account != null);
String testStorageAccount = conf.get("fs.azure.test.account.name");
conf = new Configuration();
conf.set("fs.AbstractFileSystem.wasb.impl", "org.apache.hadoop.fs.azure.Wasb");
conf.set("fs.AbstractFileSystem.wasb.impl",
"org.apache.hadoop.fs.azure.Wasb");
conf.set("fs.azure.skip.metrics", "true");
String testContainer = UUID.randomUUID().toString();
String wasbUri = String.format("wasb://%s@%s",
testContainer, testStorageAccount);
fs = new NativeAzureFileSystem();
try {
fs.initialize(new URI(wasbUri), conf);
try(NativeAzureFileSystem filesystem = new NativeAzureFileSystem()) {
filesystem.initialize(new URI(wasbUri), conf);
fail("Expected an exception, got " + filesystem);
} catch (Exception ex) {
Throwable innerException = ex.getCause();
while (innerException != null
&& !(innerException instanceof AzureException)) {
&& !(innerException instanceof AzureException)) {
innerException = innerException.getCause();
}
if (innerException != null) {
String exceptionMessage = innerException.getMessage();
if (exceptionMessage == null
|| exceptionMessage.length() == 0) {
Assert.fail();}
else {
GenericTestUtils.assertExceptionContains(String.format(
NO_ACCESS_TO_CONTAINER_MSG, testStorageAccount, testContainer),
ex);
}
GenericTestUtils.assertExceptionContains(String.format(
NO_ACCESS_TO_CONTAINER_MSG, testStorageAccount, testContainer),
ex);
} else {
Assert.fail();
fail("No inner azure exception");
}
}
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
}
}

View File

@ -20,28 +20,56 @@ package org.apache.hadoop.fs.azure;
import java.io.FileNotFoundException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.After;
import org.junit.Test;
public class TestFileSystemOperationsExceptionHandlingMultiThreaded
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*;
/**
* Multithreaded operations on FS, verify failures are as expected.
*/
public class ITestFileSystemOperationsExceptionHandlingMultiThreaded
extends AbstractWasbTestBase {
FSDataInputStream inputStream = null;
private static Path testPath = new Path("testfile.dat");
private static Path testFolderPath = new Path("testfolder");
private Path testPath;
private Path testFolderPath;
@Override
public void setUp() throws Exception {
super.setUp();
testPath = path("testfile.dat");
testFolderPath = path("testfolder");
}
/*
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
@Override
public void tearDown() throws Exception {
IOUtils.closeStream(inputStream);
ContractTestUtils.rm(fs, testPath, true, false);
ContractTestUtils.rm(fs, testFolderPath, true, false);
super.tearDown();
}
/**
* Helper method to creates an input stream to test various scenarios.
*/
private void getInputStreamToTest(FileSystem fs, Path testPath) throws Throwable {
private void getInputStreamToTest(FileSystem fs, Path testPath)
throws Throwable {
FSDataOutputStream outputStream = fs.create(testPath);
String testString = "This is a test string";
@ -51,19 +79,21 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
inputStream = fs.open(testPath);
}
/*
/**
* Test to validate correct exception is thrown for Multithreaded read
* scenario for block blobs
* scenario for block blobs.
*/
@Test(expected=FileNotFoundException.class)
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedBlockBlobReadScenario() throws Throwable {
AzureBlobStorageTestAccount testAccount = createTestAccount();
fs = testAccount.getFileSystem();
Path testFilePath1 = new Path("test1.dat");
NativeAzureFileSystem fs = testAccount.getFileSystem();
Path base = methodPath();
Path testFilePath1 = new Path(base, "test1.dat");
Path renamePath = new Path(base, "test2.dat");
getInputStreamToTest(fs, testFilePath1);
Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
Thread renameThread = new Thread(
new RenameThread(fs, testFilePath1, renamePath));
renameThread.start();
renameThread.join();
@ -72,20 +102,24 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
inputStream.read(readBuffer);
}
/*
/**
* Test to validate correct exception is thrown for Multithreaded seek
* scenario for block blobs
* scenario for block blobs.
*/
@Test(expected=FileNotFoundException.class)
@Test(expected = FileNotFoundException.class)
public void testMultiThreadBlockBlobSeekScenario() throws Throwable {
/*
AzureBlobStorageTestAccount testAccount = createTestAccount();
fs = testAccount.getFileSystem();
Path testFilePath1 = new Path("test1.dat");
*/
Path base = methodPath();
Path testFilePath1 = new Path(base, "test1.dat");
Path renamePath = new Path(base, "test2.dat");
getInputStreamToTest(fs, testFilePath1);
Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
Thread renameThread = new Thread(
new RenameThread(fs, testFilePath1, renamePath));
renameThread.start();
renameThread.join();
@ -94,43 +128,50 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
inputStream.read();
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic multi threaded setPermission scenario
/**
* Tests basic multi threaded setPermission scenario.
*/
public void testMultiThreadedPageBlobSetPermissionScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedPageBlobSetPermissionScenario()
throws Throwable {
createEmptyFile(
getPageBlobTestStorageAccount(),
testPath);
Thread t = new Thread(new DeleteThread(fs, testPath));
t.start();
while (t.isAlive()) {
fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
fs.setPermission(testPath,
new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
}
fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
fs.setPermission(testPath,
new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic multi threaded setPermission scenario
/**
* Tests basic multi threaded setPermission scenario.
*/
public void testMultiThreadedBlockBlobSetPermissionScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedBlockBlobSetPermissionScenario()
throws Throwable {
createEmptyFile(createTestAccount(),
testPath);
Thread t = new Thread(new DeleteThread(fs, testPath));
t.start();
while (t.isAlive()) {
fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
fs.setPermission(testPath,
new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
}
fs.setPermission(testPath, new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
fs.setPermission(testPath,
new FsPermission(FsAction.EXECUTE, FsAction.READ, FsAction.READ));
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic multi threaded setPermission scenario
/**
* Tests basic multi threaded setPermission scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedPageBlobOpenScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(),
createEmptyFile(createTestAccount(),
testPath);
Thread t = new Thread(new DeleteThread(fs, testPath));
t.start();
@ -143,13 +184,14 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
inputStream.close();
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic multi threaded setPermission scenario
/**
* Tests basic multi threaded setPermission scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedBlockBlobOpenScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
createEmptyFile(
getPageBlobTestStorageAccount(),
testPath);
Thread t = new Thread(new DeleteThread(fs, testPath));
t.start();
@ -162,13 +204,13 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
inputStream.close();
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic multi threaded setOwner scenario
/**
* Tests basic multi threaded setOwner scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(createTestAccount(), testPath);
createEmptyFile(createTestAccount(), testPath);
Thread t = new Thread(new DeleteThread(fs, testPath));
t.start();
while (t.isAlive()) {
@ -177,12 +219,13 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
fs.setOwner(testPath, "testowner", "testgroup");
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic multi threaded setOwner scenario
/**
* Tests basic multi threaded setOwner scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable {
ExceptionHandlingTestHelper.createEmptyFile(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
createEmptyFile(
getPageBlobTestStorageAccount(),
testPath);
Thread t = new Thread(new DeleteThread(fs, testPath));
t.start();
@ -192,28 +235,13 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
fs.setOwner(testPath, "testowner", "testgroup");
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic multi threaded listStatus scenario
/**
* Tests basic multi threaded listStatus scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable {
ExceptionHandlingTestHelper.createTestFolder(createTestAccount(), testFolderPath);
Thread t = new Thread(new DeleteThread(fs, testFolderPath));
t.start();
while (t.isAlive()) {
fs.listStatus(testFolderPath);
}
fs.listStatus(testFolderPath);
}
@Test(expected=FileNotFoundException.class)
/*
* Tests basic multi threaded listStatus scenario
*/
public void testMultiThreadedPageBlobListStatusScenario() throws Throwable {
ExceptionHandlingTestHelper.createTestFolder(ExceptionHandlingTestHelper.getPageBlobTestStorageAccount(),
createTestFolder(createTestAccount(),
testFolderPath);
Thread t = new Thread(new DeleteThread(fs, testFolderPath));
t.start();
@ -223,20 +251,38 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
fs.listStatus(testFolderPath);
}
/*
* Test to validate correct exception is thrown for Multithreaded read
* scenario for page blobs
/**
* Tests basic multi threaded listStatus scenario.
*/
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedPageBlobListStatusScenario() throws Throwable {
@Test(expected=FileNotFoundException.class)
createTestFolder(
getPageBlobTestStorageAccount(),
testFolderPath);
Thread t = new Thread(new DeleteThread(fs, testFolderPath));
t.start();
while (t.isAlive()) {
fs.listStatus(testFolderPath);
}
fs.listStatus(testFolderPath);
}
/**
* Test to validate correct exception is thrown for Multithreaded read
* scenario for page blobs.
*/
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedPageBlobReadScenario() throws Throwable {
AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
fs = testAccount.getFileSystem();
Path testFilePath1 = new Path("test1.dat");
bindToTestAccount(getPageBlobTestStorageAccount());
Path base = methodPath();
Path testFilePath1 = new Path(base, "test1.dat");
Path renamePath = new Path(base, "test2.dat");
getInputStreamToTest(fs, testFilePath1);
Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
Thread renameThread = new Thread(
new RenameThread(fs, testFilePath1, renamePath));
renameThread.start();
renameThread.join();
@ -244,87 +290,77 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
inputStream.read(readBuffer);
}
/*
/**
* Test to validate correct exception is thrown for Multithreaded seek
* scenario for page blobs
* scenario for page blobs.
*/
@Test(expected=FileNotFoundException.class)
@Test(expected = FileNotFoundException.class)
public void testMultiThreadedPageBlobSeekScenario() throws Throwable {
AzureBlobStorageTestAccount testAccount = ExceptionHandlingTestHelper.getPageBlobTestStorageAccount();
fs = testAccount.getFileSystem();
Path testFilePath1 = new Path("test1.dat");
bindToTestAccount(getPageBlobTestStorageAccount());
Path base = methodPath();
Path testFilePath1 = new Path(base, "test1.dat");
Path renamePath = new Path(base, "test2.dat");
getInputStreamToTest(fs, testFilePath1);
Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
Thread renameThread = new Thread(
new RenameThread(fs, testFilePath1, renamePath));
renameThread.start();
renameThread.join();
inputStream.seek(5);
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
@After
public void tearDown() throws Exception {
/**
* Helper thread that just renames the test file.
*/
private static class RenameThread implements Runnable {
if (inputStream != null) {
inputStream.close();
private final FileSystem fs;
private final Path testPath;
private final Path renamePath;
RenameThread(FileSystem fs,
Path testPath,
Path renamePath) {
this.fs = fs;
this.testPath = testPath;
this.renamePath = renamePath;
}
if (fs != null && fs.exists(testPath)) {
fs.delete(testPath, true);
}
}
}
/*
* Helper thread that just renames the test file.
*/
class RenameThread implements Runnable {
private FileSystem fs;
private Path testPath;
private Path renamePath = new Path("test2.dat");
public RenameThread(FileSystem fs, Path testPath) {
this.fs = fs;
this.testPath = testPath;
}
@Override
public void run(){
try {
fs.rename(testPath, renamePath);
}catch (Exception e) {
// Swallowing the exception as the
// correctness of the test is controlled
// by the other thread
}
}
}
class DeleteThread implements Runnable {
private FileSystem fs;
private Path testPath;
public DeleteThread(FileSystem fs, Path testPath) {
this.fs = fs;
this.testPath = testPath;
}
@Override
public void run() {
try {
fs.delete(testPath, true);
} catch (Exception e) {
// Swallowing the exception as the
// correctness of the test is controlled
// by the other thread
@Override
public void run() {
try {
fs.rename(testPath, renamePath);
} catch (Exception e) {
// Swallowing the exception as the
// correctness of the test is controlled
// by the other thread
}
}
}
private static class DeleteThread implements Runnable {
private final FileSystem fs;
private final Path testPath;
DeleteThread(FileSystem fs, Path testPath) {
this.fs = fs;
this.testPath = testPath;
}
@Override
public void run() {
try {
fs.delete(testPath, true);
} catch (Exception e) {
// Swallowing the exception as the
// correctness of the test is controlled
// by the other thread
}
}
}
}

View File

@ -45,7 +45,7 @@ import org.mockito.stubbing.Answer;
/**
* Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
*/
public class TestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
public class ITestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
private final int renameThreads = 10;
private final int deleteThreads = 20;

View File

@ -26,19 +26,15 @@ import static org.apache.hadoop.fs.azure.SecureStorageInterfaceImpl.KEY_USE_CONT
* Test class to hold all WASB authorization tests that use blob-specific keys
* to access storage.
*/
public class TestNativeAzureFSAuthWithBlobSpecificKeys
extends TestNativeAzureFileSystemAuthorizationWithOwner {
public class ITestNativeAzureFSAuthWithBlobSpecificKeys
extends ITestNativeAzureFileSystemAuthorizationWithOwner {
@Override
public Configuration getConfiguration() {
Configuration conf = super.getConfiguration();
public Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.set(KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS, "false");
return conf;
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
Configuration conf = getConfiguration();
return AzureBlobStorageTestAccount.create(conf);
}
}

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.Test;
import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE;
@ -27,34 +26,28 @@ import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACH
/**
* Test class to hold all WASB authorization caching related tests.
*/
public class TestNativeAzureFSAuthorizationCaching
extends TestNativeAzureFileSystemAuthorizationWithOwner {
public class ITestNativeAzureFSAuthorizationCaching
extends ITestNativeAzureFileSystemAuthorizationWithOwner {
private static final int DUMMY_TTL_VALUE = 5000;
@Override
public Configuration getConfiguration() {
Configuration conf = super.getConfiguration();
public Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.set(KEY_AUTH_SERVICE_CACHING_ENABLE, "true");
return conf;
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
Configuration conf = getConfiguration();
return AzureBlobStorageTestAccount.create(conf);
}
/**
* Test to verify cache behavior -- assert that PUT overwrites value if present
*/
@Test
public void testCachePut() throws Throwable {
CachingAuthorizer<String, Integer> cache = new CachingAuthorizer<>(DUMMY_TTL_VALUE, "TEST");
cache.init(getConfiguration());
cache.init(createConfiguration());
cache.put("TEST", 1);
cache.put("TEST", 3);
int result = cache.get("TEST");
ContractTestUtils.assertTrue("Cache returned unexpected result", result == 3);
assertEquals("Cache returned unexpected result", 3, result);
}
}
}

View File

@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration;
* operations on page blob files and folders work as expected.
* These operations include create, delete, rename, list, and so on.
*/
public class TestNativeAzureFSPageBlobLive extends
public class ITestNativeAzureFSPageBlobLive extends
NativeAzureFileSystemBaseTest {
@Override

View File

@ -28,26 +28,34 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
/**
* Test append operations.
*/
public class ITestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
private static final String TEST_FILE = "test.dat";
private static final Path TEST_PATH = new Path(TEST_FILE);
private Path testPath;
private AzureBlobStorageTestAccount testAccount = null;
@Override
public Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME,
true);
return conf;
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
testAccount = createTestAccount();
fs = testAccount.getFileSystem();
Configuration conf = fs.getConf();
conf.setBoolean(NativeAzureFileSystem.APPEND_SUPPORT_ENABLE_PROPERTY_NAME, true);
URI uri = fs.getUri();
fs.initialize(uri, conf);
testPath = methodPath();
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create(createConfiguration());
}
/*
@ -63,9 +71,7 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
// Helper method to create file and write fileSize bytes of data on it.
private byte[] createBaseFileWithData(int fileSize, Path testPath) throws Throwable {
FSDataOutputStream createStream = null;
try {
createStream = fs.create(testPath);
try(FSDataOutputStream createStream = fs.create(testPath)) {
byte[] fileData = null;
if (fileSize != 0) {
@ -73,10 +79,6 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
createStream.write(fileData);
}
return fileData;
} finally {
if (createStream != null) {
createStream.close();
}
}
}
@ -116,10 +118,8 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
*/
private boolean verifyAppend(byte[] testData, Path testFile) {
FSDataInputStream srcStream = null;
try {
try(FSDataInputStream srcStream = fs.open(testFile)) {
srcStream = fs.open(testFile);
int baseBufferSize = 2048;
int testDataSize = testData.length;
int testDataIndex = 0;
@ -140,14 +140,6 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
return true;
} catch(Exception ex) {
return false;
} finally {
if (srcStream != null) {
try {
srcStream.close();
} catch(IOException ioe) {
// Swallowing
}
}
}
}
@ -161,18 +153,18 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
FSDataOutputStream appendStream = null;
try {
int baseDataSize = 50;
byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
int appendDataSize = 20;
byte[] appendDataBuffer = getTestData(appendDataSize);
appendStream = fs.append(TEST_PATH, 10);
appendStream = fs.append(testPath, 10);
appendStream.write(appendDataBuffer);
appendStream.close();
byte[] testData = new byte[baseDataSize + appendDataSize];
System.arraycopy(baseDataBuffer, 0, testData, 0, baseDataSize);
System.arraycopy(appendDataBuffer, 0, testData, baseDataSize, appendDataSize);
Assert.assertTrue(verifyAppend(testData, TEST_PATH));
assertTrue(verifyAppend(testData, testPath));
} finally {
if (appendStream != null) {
appendStream.close();
@ -189,15 +181,15 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
FSDataOutputStream appendStream = null;
try {
createBaseFileWithData(0, TEST_PATH);
createBaseFileWithData(0, testPath);
int appendDataSize = 20;
byte[] appendDataBuffer = getTestData(appendDataSize);
appendStream = fs.append(TEST_PATH, 10);
appendStream = fs.append(testPath, 10);
appendStream.write(appendDataBuffer);
appendStream.close();
Assert.assertTrue(verifyAppend(appendDataBuffer, TEST_PATH));
assertTrue(verifyAppend(appendDataBuffer, testPath));
} finally {
if (appendStream != null) {
appendStream.close();
@ -215,11 +207,11 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
FSDataOutputStream appendStream2 = null;
IOException ioe = null;
try {
createBaseFileWithData(0, TEST_PATH);
appendStream1 = fs.append(TEST_PATH, 10);
createBaseFileWithData(0, testPath);
appendStream1 = fs.append(testPath, 10);
boolean encounteredException = false;
try {
appendStream2 = fs.append(TEST_PATH, 10);
appendStream2 = fs.append(testPath, 10);
} catch(IOException ex) {
encounteredException = true;
ioe = ex;
@ -227,7 +219,7 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
appendStream1.close();
Assert.assertTrue(encounteredException);
assertTrue(encounteredException);
GenericTestUtils.assertExceptionContains("Unable to set Append lease on the Blob", ioe);
} finally {
if (appendStream1 != null) {
@ -247,7 +239,7 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
public void testMultipleAppends() throws Throwable {
int baseDataSize = 50;
byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
int appendDataSize = 100;
int targetAppendCount = 50;
@ -264,7 +256,7 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
while (appendCount < targetAppendCount) {
byte[] appendDataBuffer = getTestData(appendDataSize);
appendStream = fs.append(TEST_PATH, 30);
appendStream = fs.append(testPath, 30);
appendStream.write(appendDataBuffer);
appendStream.close();
@ -273,7 +265,7 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
appendCount++;
}
Assert.assertTrue(verifyAppend(testData, TEST_PATH));
assertTrue(verifyAppend(testData, testPath));
} finally {
if (appendStream != null) {
@ -289,7 +281,7 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
public void testMultipleAppendsOnSameStream() throws Throwable {
int baseDataSize = 50;
byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, TEST_PATH);
byte[] baseDataBuffer = createBaseFileWithData(baseDataSize, testPath);
int appendDataSize = 100;
int targetAppendCount = 50;
byte[] testData = new byte[baseDataSize + (appendDataSize*targetAppendCount)];
@ -304,7 +296,7 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
while (appendCount < targetAppendCount) {
appendStream = fs.append(TEST_PATH, 50);
appendStream = fs.append(testPath, 50);
int singleAppendChunkSize = 20;
int appendRunSize = 0;
@ -323,7 +315,7 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
appendCount++;
}
Assert.assertTrue(verifyAppend(testData, TEST_PATH));
assertTrue(verifyAppend(testData, testPath));
} finally {
if (appendStream != null) {
appendStream.close();
@ -346,8 +338,8 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
FSDataOutputStream appendStream = null;
try {
createBaseFileWithData(0, TEST_PATH);
appendStream = fs.append(TEST_PATH, 10);
createBaseFileWithData(0, testPath);
appendStream = fs.append(testPath, 10);
} finally {
if (appendStream != null) {
appendStream.close();
@ -355,8 +347,4 @@ public class TestNativeAzureFileSystemAppend extends AbstractWasbTestBase {
}
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
}

View File

@ -22,24 +22,29 @@ import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestNativeAzureFileSystemAtomicRenameDirList
/**
* Test atomic renaming.
*/
public class ITestNativeAzureFileSystemAtomicRenameDirList
extends AbstractWasbTestBase {
private AzureBlobStorageTestAccount testAccount;
// HBase-site config controlling HBase root dir
private static final String HBASE_ROOT_DIR_CONF_STRING = "hbase.rootdir";
private static final String HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS = "wasb://somedifferentfilesystem.blob.core.windows.net/hbase";
private static final String HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS =
"wasb://somedifferentfilesystem.blob.core.windows.net/hbase";
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
return testAccount;
return AzureBlobStorageTestAccount.create();
}
@Test
public void testAzureNativeStoreIsAtomicRenameKeyDoesNotThrowNPEOnInitializingWithNonDefaultURI () throws IOException {
NativeAzureFileSystem azureFs = (NativeAzureFileSystem)fs;
public void testAtomicRenameKeyDoesntNPEOnInitializingWithNonDefaultURI()
throws IOException {
NativeAzureFileSystem azureFs = fs;
AzureNativeFileSystemStore azureStore = azureFs.getStore();
Configuration conf = fs.getConf();
conf.set(HBASE_ROOT_DIR_CONF_STRING, HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS);

View File

@ -24,23 +24,23 @@ import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import org.junit.Before;
import static org.junit.Assert.assertEquals;
/**
* Test class that runs wasb authorization tests with owner check enabled.
*/
public class TestNativeAzureFileSystemAuthorizationWithOwner
public class ITestNativeAzureFileSystemAuthorizationWithOwner
extends TestNativeAzureFileSystemAuthorization {
@Before
public void beforeMethod() {
super.beforeMethod();
@Override
public void setUp() throws Exception {
super.setUp();
authorizer.init(fs.getConf(), true);
}
/**
* Test case when owner matches current user
* Test case when owner matches current user.
*/
@Test
public void testOwnerPermissionPositive() throws Throwable {
@ -71,7 +71,7 @@ public class TestNativeAzureFileSystemAuthorizationWithOwner
}
/**
* Negative test case for owner does not match current user
* Negative test case for owner does not match current user.
*/
@Test
public void testOwnerPermissionNegative() throws Throwable {
@ -108,7 +108,7 @@ public class TestNativeAzureFileSystemAuthorizationWithOwner
/**
* Test to verify that retrieving owner information does not
* throw when file/folder does not exist
* throw when file/folder does not exist.
*/
@Test
public void testRetrievingOwnerDoesNotFailWhenFileDoesNotExist() throws Throwable {

View File

@ -18,9 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.net.URI;
import java.util.StringTokenizer;
@ -36,12 +33,12 @@ import org.junit.Test;
* testing with Live Azure storage because Emulator does not have support for
* client-side logging.
*
* <I>Important: </I> Do not attempt to move off commons-logging.
* The tests will fail.
*/
public class TestNativeAzureFileSystemClientLogging
public class ITestNativeAzureFileSystemClientLogging
extends AbstractWasbTestBase {
private AzureBlobStorageTestAccount testAccount;
// Core-site config controlling Azure Storage Client logging
private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging";
@ -134,7 +131,6 @@ public class TestNativeAzureFileSystemClientLogging
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
return testAccount;
return AzureBlobStorageTestAccount.create();
}
}

View File

@ -35,11 +35,12 @@ import java.util.concurrent.Future;
/***
* Test class to hold all Live Azure storage concurrency tests.
*/
public class TestNativeAzureFileSystemConcurrencyLive
public class ITestNativeAzureFileSystemConcurrencyLive
extends AbstractWasbTestBase {
private static final int THREAD_COUNT = 102;
private static final int TEST_EXECUTION_TIMEOUT = 5000;
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
@ -53,7 +54,7 @@ public class TestNativeAzureFileSystemConcurrencyLive
*/
@Test(timeout = TEST_EXECUTION_TIMEOUT)
public void testConcurrentCreateDeleteFile() throws Exception {
Path testFile = new Path("test.dat");
Path testFile = methodPath();
List<CreateFileTask> tasks = new ArrayList<>(THREAD_COUNT);
@ -128,57 +129,57 @@ public class TestNativeAzureFileSystemConcurrencyLive
}
}
}
}
abstract class FileSystemTask<V> implements Callable<V> {
private final FileSystem fileSystem;
private final Path path;
abstract class FileSystemTask<V> implements Callable<V> {
private final FileSystem fileSystem;
private final Path path;
protected FileSystem getFileSystem() {
return this.fileSystem;
protected FileSystem getFileSystem() {
return this.fileSystem;
}
protected Path getFilePath() {
return this.path;
}
FileSystemTask(FileSystem fs, Path p) {
this.fileSystem = fs;
this.path = p;
}
public abstract V call() throws Exception;
}
protected Path getFilePath() {
return this.path;
class DeleteFileTask extends FileSystemTask<Boolean> {
DeleteFileTask(FileSystem fs, Path p) {
super(fs, p);
}
@Override
public Boolean call() throws Exception {
return this.getFileSystem().delete(this.getFilePath(), false);
}
}
FileSystemTask(FileSystem fs, Path p) {
this.fileSystem = fs;
this.path = p;
}
class CreateFileTask extends FileSystemTask<Void> {
CreateFileTask(FileSystem fs, Path p) {
super(fs, p);
}
public abstract V call() throws Exception;
}
public Void call() throws Exception {
FileSystem fs = getFileSystem();
Path p = getFilePath();
class DeleteFileTask extends FileSystemTask<Boolean> {
// Create an empty file and close the stream.
FSDataOutputStream stream = fs.create(p, true);
stream.close();
DeleteFileTask(FileSystem fs, Path p) {
super(fs, p);
}
// Delete the file. We don't care if delete returns true or false.
// We just want to ensure the file does not exist.
this.getFileSystem().delete(this.getFilePath(), false);
@Override
public Boolean call() throws Exception {
return this.getFileSystem().delete(this.getFilePath(), false);
return null;
}
}
}
class CreateFileTask extends FileSystemTask<Void> {
CreateFileTask(FileSystem fs, Path p) {
super(fs, p);
}
public Void call() throws Exception {
FileSystem fs = getFileSystem();
Path p = getFilePath();
// Create an empty file and close the stream.
FSDataOutputStream stream = fs.create(p, true);
stream.close();
// Delete the file. We don't care if delete returns true or false.
// We just want to ensure the file does not exist.
this.getFileSystem().delete(this.getFilePath(), false);
return null;
}
}

View File

@ -21,28 +21,45 @@ package org.apache.hadoop.fs.azure;
import static org.junit.Assume.assumeNotNull;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.junit.After;
import org.junit.Before;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
public class TestNativeAzureFileSystemContractEmulator extends
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
/**
* Run the {@code FileSystemContractBaseTest} tests against the emulator
*/
public class ITestNativeAzureFileSystemContractEmulator extends
FileSystemContractBaseTest {
private AzureBlobStorageTestAccount testAccount;
private Path basePath;
@Rule
public TestName methodName = new TestName();
private void nameThread() {
Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
}
@Before
public void setUp() throws Exception {
nameThread();
testAccount = AzureBlobStorageTestAccount.createForEmulator();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(fs);
basePath = fs.makeQualified(
AzureTestUtils.createTestPath(
new Path("ITestNativeAzureFileSystemContractEmulator")));
}
@After
@Override
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
super.tearDown();
testAccount = AzureTestUtils.cleanup(testAccount);
fs = null;
}
}

View File

@ -21,31 +21,59 @@ package org.apache.hadoop.fs.azure;
import static org.junit.Assume.assumeNotNull;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
public class TestNativeAzureFileSystemContractLive extends
/**
* Run the {@link FileSystemContractBaseTest} test suite against azure storage.
*/
public class ITestNativeAzureFileSystemContractLive extends
FileSystemContractBaseTest {
private AzureBlobStorageTestAccount testAccount;
private Path basePath;
@Rule
public TestName methodName = new TestName();
private void nameThread() {
Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
}
@Before
public void setUp() throws Exception {
nameThread();
testAccount = AzureBlobStorageTestAccount.create();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(fs);
basePath = fs.makeQualified(
AzureTestUtils.createTestPath(
new Path("NativeAzureFileSystemContractLive")));
}
@After
@Override
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
super.tearDown();
testAccount = AzureTestUtils.cleanup(testAccount);
fs = null;
}
@Override
public Path getTestBaseDir() {
return basePath;
}
protected int getGlobalTimeout() {
return AzureTestConstants.AZURE_TEST_TIMEOUT;
}
/**

View File

@ -20,15 +20,31 @@ package org.apache.hadoop.fs.azure;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.junit.After;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import static org.junit.Assume.assumeNotNull;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
public class TestNativeAzureFileSystemContractPageBlobLive extends
/**
* Run the {@link FileSystemContractBaseTest} test suite against azure
* storage, after switching the FS using page blobs everywhere.
*/
public class ITestNativeAzureFileSystemContractPageBlobLive extends
FileSystemContractBaseTest {
private AzureBlobStorageTestAccount testAccount;
private Path basePath;
@Rule
public TestName methodName = new TestName();
private void nameThread() {
Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
}
private AzureBlobStorageTestAccount createTestAccount()
throws Exception {
@ -46,19 +62,24 @@ public class TestNativeAzureFileSystemContractPageBlobLive extends
@Before
public void setUp() throws Exception {
testAccount = createTestAccount();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(fs);
assumeNotNull(testAccount);
fs = testAccount.getFileSystem();
basePath = AzureTestUtils.pathForTests(fs, "filesystemcontractpageblob");
}
@After
@Override
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
testAccount = AzureTestUtils.cleanup(testAccount);
fs = null;
}
protected int getGlobalTimeout() {
return AzureTestConstants.AZURE_TEST_TIMEOUT;
}
@Override
public Path getTestBaseDir() {
return basePath;
}
/**

View File

@ -18,10 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.CountDownLatch;
import org.apache.hadoop.io.IOUtils;
@ -33,11 +29,10 @@ import org.junit.Test;
import com.microsoft.azure.storage.StorageException;
/*
* Tests the Native Azure file system (WASB) against an actual blob store if
* provided in the environment.
/**
* Tests the Native Azure file system (WASB) against an actual blob store.
*/
public class TestNativeAzureFileSystemLive extends
public class ITestNativeAzureFileSystemLive extends
NativeAzureFileSystemBaseTest {
@Override
@ -48,22 +43,22 @@ public class TestNativeAzureFileSystemLive extends
@Test
public void testLazyRenamePendingCanOverwriteExistingFile()
throws Exception {
final String SRC_FILE_KEY = "srcFile";
final String DST_FILE_KEY = "dstFile";
Path srcPath = new Path(SRC_FILE_KEY);
final String srcFile = "srcFile";
final String dstFile = "dstFile";
Path srcPath = path(srcFile);
FSDataOutputStream srcStream = fs.create(srcPath);
assertTrue(fs.exists(srcPath));
Path dstPath = new Path(DST_FILE_KEY);
Path dstPath = path(dstFile);
FSDataOutputStream dstStream = fs.create(dstPath);
assertTrue(fs.exists(dstPath));
NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
NativeAzureFileSystem nfs = fs;
final String fullSrcKey = nfs.pathToKey(nfs.makeAbsolute(srcPath));
final String fullDstKey = nfs.pathToKey(nfs.makeAbsolute(dstPath));
nfs.getStoreInterface().rename(fullSrcKey, fullDstKey, true, null);
assertTrue(fs.exists(dstPath));
assertFalse(fs.exists(srcPath));
IOUtils.cleanup(null, srcStream);
IOUtils.cleanup(null, dstStream);
IOUtils.cleanupWithLogger(null, srcStream);
IOUtils.cleanupWithLogger(null, dstStream);
}
/**
* Tests fs.delete() function to delete a blob when another blob is holding a
@ -77,12 +72,11 @@ public class TestNativeAzureFileSystemLive extends
public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage()
throws Exception {
LOG.info("Starting test");
final String FILE_KEY = "fileWithLease";
// Create the file
Path path = new Path(FILE_KEY);
Path path = methodPath();
fs.create(path);
assertTrue(fs.exists(path));
NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
assertPathExists("test file", path);
NativeAzureFileSystem nfs = fs;
final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
final AzureNativeFileSystemStore store = nfs.getStore();
@ -142,7 +136,7 @@ public class TestNativeAzureFileSystemLive extends
store.delete(fullKey);
// At this point file SHOULD BE DELETED
assertFalse(fs.exists(path));
assertPathDoesNotExist("Leased path", path);
}
/**
@ -153,7 +147,7 @@ public class TestNativeAzureFileSystemLive extends
*/
@Test
public void testIsPageBlobKey() {
AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
AzureNativeFileSystemStore store = fs.getStore();
// Use literal strings so it's easier to understand the tests.
// In case the constant changes, we want to know about it so we can update this test.
@ -184,7 +178,7 @@ public class TestNativeAzureFileSystemLive extends
@Test
public void testIsAtomicRenameKey() {
AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
AzureNativeFileSystemStore store = fs.getStore();
// We want to know if the default configuration changes so we can fix
// this test.
@ -225,15 +219,15 @@ public class TestNativeAzureFileSystemLive extends
@Test
public void testMkdirOnExistingFolderWithLease() throws Exception {
SelfRenewingLease lease;
final String FILE_KEY = "folderWithLease";
// Create the folder
fs.mkdirs(new Path(FILE_KEY));
NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs;
String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(FILE_KEY)));
Path path = methodPath();
fs.mkdirs(path);
NativeAzureFileSystem nfs = fs;
String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
AzureNativeFileSystemStore store = nfs.getStore();
// Acquire the lease on the folder
lease = store.acquireLease(fullKey);
assertTrue(lease.getLeaseID() != null);
assertNotNull("lease ID", lease.getLeaseID() != null);
// Try to create the same folder
store.storeEmptyFolder(fullKey,
nfs.createPermissionStatus(FsPermission.getDirDefault()));

View File

@ -18,40 +18,22 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.microsoft.azure.storage.blob.BlobOutputStream;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
public class TestOutOfBandAzureBlobOperationsLive {
private FileSystem fs;
private AzureBlobStorageTestAccount testAccount;
/**
* Live blob operations.
*/
public class ITestOutOfBandAzureBlobOperationsLive extends AbstractWasbTestBase {
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(testAccount);
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
// scenario for this particular test described at MONARCH-HADOOP-764

View File

@ -18,37 +18,33 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.azure.integration.AbstractAzureScaleTest;
import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils .*;
/**
* Write data into a page blob and verify you can read back all of it
* or just a part of it.
*/
public class TestReadAndSeekPageBlobAfterWrite {
private static final Log LOG = LogFactory.getLog(TestReadAndSeekPageBlobAfterWrite.class);
public class ITestReadAndSeekPageBlobAfterWrite extends AbstractAzureScaleTest {
private static final Logger LOG =
LoggerFactory.getLogger(ITestReadAndSeekPageBlobAfterWrite.class);
private FileSystem fs;
private AzureBlobStorageTestAccount testAccount;
private byte[] randomData;
// Page blob physical page size
@ -63,35 +59,28 @@ public class TestReadAndSeekPageBlobAfterWrite {
// A key with a prefix under /pageBlobs, which for the test file system will
// force use of a page blob.
private static final String KEY = "/pageBlobs/file.dat";
private static final Path PATH = new Path(KEY); // path of page blob file to read and write
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
// path of page blob file to read and write
private Path blobPath;
@Before
@Override
public void setUp() throws Exception {
testAccount = createTestAccount();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(testAccount);
super.setUp();
fs = getTestAccount().getFileSystem();
// Make sure we are using an integral number of pages.
assertEquals(0, MAX_BYTES % PAGE_SIZE);
// load an in-memory array of random data
randomData = new byte[PAGE_SIZE * MAX_PAGES];
rand.nextBytes(randomData);
blobPath = blobPath("ITestReadAndSeekPageBlobAfterWrite");
}
@After
@Override
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
deleteQuietly(fs, blobPath, true);
super.tearDown();
}
/**
@ -101,9 +90,9 @@ public class TestReadAndSeekPageBlobAfterWrite {
@Test
public void testIsPageBlobFileName() {
AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore();
String[] a = KEY.split("/");
String[] a = blobPath.toUri().getPath().split("/");
String key2 = a[1] + "/";
assertTrue(store.isPageBlobKey(key2));
assertTrue("Not a page blob: " + blobPath, store.isPageBlobKey(key2));
}
/**
@ -114,7 +103,7 @@ public class TestReadAndSeekPageBlobAfterWrite {
public void testReadAfterWriteRandomData() throws IOException {
// local shorthand
final int PDS = PAGE_DATA_SIZE;
final int pds = PAGE_DATA_SIZE;
// Test for sizes at and near page boundaries
int[] dataSizes = {
@ -124,13 +113,13 @@ public class TestReadAndSeekPageBlobAfterWrite {
// Near first physical page boundary (because the implementation
// stores PDS + the page header size bytes on each page).
PDS - 1, PDS, PDS + 1, PDS + 2, PDS + 3,
pds - 1, pds, pds + 1, pds + 2, pds + 3,
// near second physical page boundary
(2 * PDS) - 1, (2 * PDS), (2 * PDS) + 1, (2 * PDS) + 2, (2 * PDS) + 3,
(2 * pds) - 1, (2 * pds), (2 * pds) + 1, (2 * pds) + 2, (2 * pds) + 3,
// near tenth physical page boundary
(10 * PDS) - 1, (10 * PDS), (10 * PDS) + 1, (10 * PDS) + 2, (10 * PDS) + 3,
(10 * pds) - 1, (10 * pds), (10 * pds) + 1, (10 * pds) + 2, (10 * pds) + 3,
// test one big size, >> 4MB (an internal buffer size in the code)
MAX_BYTES
@ -152,7 +141,7 @@ public class TestReadAndSeekPageBlobAfterWrite {
*/
private void readRandomDataAndVerify(int size) throws AzureException, IOException {
byte[] b = new byte[size];
FSDataInputStream stream = fs.open(PATH);
FSDataInputStream stream = fs.open(blobPath);
int bytesRead = stream.read(b);
stream.close();
assertEquals(bytesRead, size);
@ -176,7 +165,7 @@ public class TestReadAndSeekPageBlobAfterWrite {
// Write a specified amount of random data to the file path for this test class.
private void writeRandomData(int size) throws IOException {
OutputStream output = fs.create(PATH);
OutputStream output = fs.create(blobPath);
output.write(randomData, 0, size);
output.close();
}
@ -190,43 +179,45 @@ public class TestReadAndSeekPageBlobAfterWrite {
writeRandomData(PAGE_SIZE * MAX_PAGES);
int recordSize = 100;
byte[] b = new byte[recordSize];
FSDataInputStream stream = fs.open(PATH);
// Seek to a boundary around the middle of the 6th page
int seekPosition = 5 * PAGE_SIZE + 250;
stream.seek(seekPosition);
// Read a record's worth of bytes and verify results
int bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
try(FSDataInputStream stream = fs.open(blobPath)) {
// Seek to a boundary around the middle of the 6th page
int seekPosition = 5 * PAGE_SIZE + 250;
stream.seek(seekPosition);
// Seek to another spot and read a record greater than a page
seekPosition = 10 * PAGE_SIZE + 250;
stream.seek(seekPosition);
recordSize = 1000;
b = new byte[recordSize];
bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Read a record's worth of bytes and verify results
int bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Read the last 100 bytes of the file
recordSize = 100;
seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
stream.seek(seekPosition);
b = new byte[recordSize];
bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Seek to another spot and read a record greater than a page
seekPosition = 10 * PAGE_SIZE + 250;
stream.seek(seekPosition);
recordSize = 1000;
b = new byte[recordSize];
bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// Read past the end of the file and we should get only partial data.
recordSize = 100;
seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
stream.seek(seekPosition);
b = new byte[recordSize];
bytesRead = stream.read(b);
assertEquals(50, bytesRead);
// Read the last 100 bytes of the file
recordSize = 100;
seekPosition = PAGE_SIZE * MAX_PAGES - recordSize;
stream.seek(seekPosition);
b = new byte[recordSize];
bytesRead = stream.read(b);
verifyReadRandomData(b, bytesRead, seekPosition, recordSize);
// compare last 50 bytes written with those read
byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
assertTrue(comparePrefix(tail, b, 50));
// Read past the end of the file and we should get only partial data.
recordSize = 100;
seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50;
stream.seek(seekPosition);
b = new byte[recordSize];
bytesRead = stream.read(b);
assertEquals(50, bytesRead);
// compare last 50 bytes written with those read
byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length);
assertTrue(comparePrefix(tail, b, 50));
}
}
// Verify that reading a record of data after seeking gives the expected data.
@ -253,16 +244,14 @@ public class TestReadAndSeekPageBlobAfterWrite {
* The syncInterval is the number of writes after which to call hflush to
* force the data to storage.
*/
private void writeAndReadOneFile(int numWrites, int recordLength, int syncInterval) throws IOException {
final int NUM_WRITES = numWrites;
final int RECORD_LENGTH = recordLength;
final int SYNC_INTERVAL = syncInterval;
private void writeAndReadOneFile(int numWrites,
int recordLength, int syncInterval) throws IOException {
// A lower bound on the minimum time we think it will take to do
// a write to Azure storage.
final long MINIMUM_EXPECTED_TIME = 20;
LOG.info("Writing " + NUM_WRITES * RECORD_LENGTH + " bytes to " + PATH.getName());
FSDataOutputStream output = fs.create(PATH);
LOG.info("Writing " + numWrites * recordLength + " bytes to " + blobPath.getName());
FSDataOutputStream output = fs.create(blobPath);
int writesSinceHFlush = 0;
try {
@ -270,11 +259,11 @@ public class TestReadAndSeekPageBlobAfterWrite {
// to test concurrent execution gates.
output.flush();
output.hflush();
for (int i = 0; i < NUM_WRITES; i++) {
output.write(randomData, i * RECORD_LENGTH, RECORD_LENGTH);
for (int i = 0; i < numWrites; i++) {
output.write(randomData, i * recordLength, recordLength);
writesSinceHFlush++;
output.flush();
if ((i % SYNC_INTERVAL) == 0) {
if ((i % syncInterval) == 0) {
output.hflush();
writesSinceHFlush = 0;
}
@ -293,8 +282,8 @@ public class TestReadAndSeekPageBlobAfterWrite {
}
// Read the data back and check it.
FSDataInputStream stream = fs.open(PATH);
int SIZE = NUM_WRITES * RECORD_LENGTH;
FSDataInputStream stream = fs.open(blobPath);
int SIZE = numWrites * recordLength;
byte[] b = new byte[SIZE];
try {
stream.seek(0);
@ -305,7 +294,7 @@ public class TestReadAndSeekPageBlobAfterWrite {
}
// delete the file
fs.delete(PATH, false);
fs.delete(blobPath, false);
}
// Test writing to a large file repeatedly as a stress test.
@ -324,32 +313,29 @@ public class TestReadAndSeekPageBlobAfterWrite {
// Write to a file repeatedly to verify that it extends.
// The page blob file should start out at 128MB and finish at 256MB.
@Test(timeout=300000)
public void testFileSizeExtension() throws IOException {
final int writeSize = 1024 * 1024;
final int numWrites = 129;
final byte dataByte = 5;
byte[] data = new byte[writeSize];
Arrays.fill(data, dataByte);
FSDataOutputStream output = fs.create(PATH);
try {
try (FSDataOutputStream output = fs.create(blobPath)) {
for (int i = 0; i < numWrites; i++) {
output.write(data);
output.hflush();
LOG.debug("total writes = " + (i + 1));
}
} finally {
output.close();
}
// Show that we wrote more than the default page blob file size.
assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE);
// Verify we can list the new size. That will prove we expanded the file.
FileStatus[] status = fs.listStatus(PATH);
assertTrue(status[0].getLen() == numWrites * writeSize);
LOG.debug("Total bytes written to " + PATH + " = " + status[0].getLen());
fs.delete(PATH, false);
FileStatus[] status = fs.listStatus(blobPath);
assertEquals("File size hasn't changed " + status,
numWrites * writeSize, status[0].getLen());
LOG.debug("Total bytes written to " + blobPath + " = " + status[0].getLen());
fs.delete(blobPath, false);
}
}

View File

@ -35,7 +35,6 @@ import org.apache.http.client.methods.HttpGet;
import org.hamcrest.Description;
import org.hamcrest.TypeSafeMatcher;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
@ -53,9 +52,9 @@ import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.times;
/**
* Test class to hold all WasbRemoteCallHelper tests
* Test class to hold all WasbRemoteCallHelper tests.
*/
public class TestWasbRemoteCallHelper
public class ITestWasbRemoteCallHelper
extends AbstractWasbTestBase {
public static final String EMPTY_STRING = "";
private static final int INVALID_HTTP_STATUS_CODE_999 = 999;
@ -68,23 +67,21 @@ public class TestWasbRemoteCallHelper
return AzureBlobStorageTestAccount.create(conf);
}
@Before
public void beforeMethod() {
@Override
public void setUp() throws Exception {
super.setUp();
boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
boolean useAuthorization = fs.getConf()
.getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
useSecureMode && useAuthorization);
Assume.assumeTrue(
useSecureMode && useAuthorization
);
}
@Rule
public ExpectedException expectedEx = ExpectedException.none();
/**
* Test invalid status-code
* Test invalid status-code.
* @throws Throwable
*/
@Test // (expected = WasbAuthorizationException.class)
@ -95,15 +92,17 @@ public class TestWasbRemoteCallHelper
// set up mocks
HttpClient mockHttpClient = Mockito.mock(HttpClient.class);
HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class);
Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse);
Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any()))
.thenReturn(mockHttpResponse);
Mockito.when(mockHttpResponse.getStatusLine())
.thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999));
// finished setting up mocks
performop(mockHttpClient);
}
/**
* Test invalid Content-Type
* Test invalid Content-Type.
* @throws Throwable
*/
@Test // (expected = WasbAuthorizationException.class)
@ -124,7 +123,7 @@ public class TestWasbRemoteCallHelper
}
/**
* Test missing Content-Length
* Test missing Content-Length.
* @throws Throwable
*/
@Test // (expected = WasbAuthorizationException.class)
@ -145,7 +144,7 @@ public class TestWasbRemoteCallHelper
}
/**
* Test Content-Length exceeds max
* Test Content-Length exceeds max.
* @throws Throwable
*/
@Test // (expected = WasbAuthorizationException.class)
@ -191,7 +190,7 @@ public class TestWasbRemoteCallHelper
}
/**
* Test valid JSON response
* Test valid JSON response.
* @throws Throwable
*/
@Test
@ -220,7 +219,7 @@ public class TestWasbRemoteCallHelper
}
/**
* Test malformed JSON response
* Test malformed JSON response.
* @throws Throwable
*/
@Test // (expected = WasbAuthorizationException.class)
@ -250,7 +249,7 @@ public class TestWasbRemoteCallHelper
}
/**
* Test valid JSON response failure response code
* Test valid JSON response failure response code.
* @throws Throwable
*/
@Test // (expected = WasbAuthorizationException.class)

View File

@ -19,11 +19,6 @@
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeNotNull;
import java.io.ByteArrayInputStream;
@ -36,6 +31,7 @@ import java.util.Date;
import java.util.EnumSet;
import java.io.File;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
@ -57,7 +53,7 @@ import org.junit.rules.TemporaryFolder;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import com.microsoft.azure.storage.blob.CloudBlockBlob;
public class TestWasbUriAndConfiguration {
public class ITestWasbUriAndConfiguration extends AbstractWasbTestWithTimeout {
private static final int FILE_SIZE = 4096;
private static final String PATH_DELIMITER = "/";
@ -73,10 +69,7 @@ public class TestWasbUriAndConfiguration {
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
testAccount = AzureTestUtils.cleanupTestAccount(testAccount);
}
@Before

View File

@ -38,11 +38,12 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
private boolean performOwnerMatch;
private CachingAuthorizer<CachedAuthorizerEntry, Boolean> cache;
// The full qualified URL to the root directory
// The full qualified URL to the root directory
private String qualifiedPrefixUrl;
public MockWasbAuthorizerImpl(NativeAzureFileSystem fs) {
qualifiedPrefixUrl = new Path("/").makeQualified(fs.getUri(), fs.getWorkingDirectory())
qualifiedPrefixUrl = new Path("/").makeQualified(fs.getUri(),
fs.getWorkingDirectory())
.toString().replaceAll("/$", "");
cache = new CachingAuthorizer<>(TimeUnit.MINUTES.convert(5L, TimeUnit.MINUTES), "AUTHORIZATION");
}
@ -64,19 +65,23 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
public void addAuthRule(String wasbAbsolutePath,
String accessType, boolean access) {
wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
? new AuthorizationComponent("^" + wasbAbsolutePath.replace("*", ".*"), accessType)
wasbAbsolutePath = qualifiedPrefixUrl + wasbAbsolutePath;
AuthorizationComponent component = wasbAbsolutePath.endsWith("*")
? new AuthorizationComponent("^" + wasbAbsolutePath.replace("*", ".*"),
accessType)
: new AuthorizationComponent(wasbAbsolutePath, accessType);
this.authRules.put(component, access);
}
@Override
public boolean authorize(String wasbAbsolutePath, String accessType, String owner)
public boolean authorize(String wasbAbsolutePath,
String accessType,
String owner)
throws WasbAuthorizationException {
if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
if (wasbAbsolutePath.endsWith(
NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
return true;
}
@ -108,20 +113,23 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
// In case of root("/"), owner match does not happen because owner is returned as empty string.
// we try to force owner match just for purpose of tests to make sure all operations work seemlessly with owner.
if (this.performOwnerMatch
&& StringUtils.equalsIgnoreCase(wasbAbsolutePath, qualifiedPrefixUrl + "/")) {
&& StringUtils.equalsIgnoreCase(wasbAbsolutePath,
qualifiedPrefixUrl + "/")) {
owner = currentUserShortName;
}
boolean shouldEvaluateOwnerAccess = owner != null && !owner.isEmpty()
&& this.performOwnerMatch;
&& this.performOwnerMatch;
boolean isOwnerMatch = StringUtils.equalsIgnoreCase(currentUserShortName, owner);
boolean isOwnerMatch = StringUtils.equalsIgnoreCase(currentUserShortName,
owner);
AuthorizationComponent component =
new AuthorizationComponent(wasbAbsolutePath, accessType);
if (authRules.containsKey(component)) {
return shouldEvaluateOwnerAccess ? isOwnerMatch && authRules.get(component) : authRules.get(component);
return shouldEvaluateOwnerAccess ? isOwnerMatch && authRules.get(
component) : authRules.get(component);
} else {
// Regex-pattern match if we don't have a straight match
for (Map.Entry<AuthorizationComponent, Boolean> entry : authRules.entrySet()) {
@ -129,8 +137,11 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
String keyPath = key.getWasbAbsolutePath();
String keyAccess = key.getAccessType();
if (keyPath.endsWith("*") && Pattern.matches(keyPath, wasbAbsolutePath) && keyAccess.equals(accessType)) {
return shouldEvaluateOwnerAccess ? isOwnerMatch && entry.getValue() : entry.getValue();
if (keyPath.endsWith("*") && Pattern.matches(keyPath, wasbAbsolutePath)
&& keyAccess.equals(accessType)) {
return shouldEvaluateOwnerAccess
? isOwnerMatch && entry.getValue()
: entry.getValue();
}
}
return false;
@ -141,47 +152,47 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
authRules.clear();
cache.clear();
}
private static class AuthorizationComponent {
private final String wasbAbsolutePath;
private final String accessType;
AuthorizationComponent(String wasbAbsolutePath,
String accessType) {
this.wasbAbsolutePath = wasbAbsolutePath;
this.accessType = accessType;
}
@Override
public int hashCode() {
return this.wasbAbsolutePath.hashCode() ^ this.accessType.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null
|| !(obj instanceof AuthorizationComponent)) {
return false;
}
return ((AuthorizationComponent) obj).
getWasbAbsolutePath().equals(this.wasbAbsolutePath)
&& ((AuthorizationComponent) obj).
getAccessType().equals(this.accessType);
}
public String getWasbAbsolutePath() {
return this.wasbAbsolutePath;
}
public String getAccessType() {
return accessType;
}
}
}
class AuthorizationComponent {
private String wasbAbsolutePath;
private String accessType;
public AuthorizationComponent(String wasbAbsolutePath,
String accessType) {
this.wasbAbsolutePath = wasbAbsolutePath;
this.accessType = accessType;
}
@Override
public int hashCode() {
return this.wasbAbsolutePath.hashCode() ^ this.accessType.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj == null
|| !(obj instanceof AuthorizationComponent)) {
return false;
}
return ((AuthorizationComponent)obj).
getWasbAbsolutePath().equals(this.wasbAbsolutePath)
&& ((AuthorizationComponent)obj).
getAccessType().equals(this.accessType);
}
public String getWasbAbsolutePath() {
return this.wasbAbsolutePath;
}
public String getAccessType() {
return accessType;
}
}

View File

@ -18,12 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
@ -47,16 +41,18 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
import com.microsoft.azure.storage.AccessCondition;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlob;
import static org.apache.hadoop.test.GenericTestUtils.*;
/*
* Tests the Native Azure file system (WASB) against an actual blob store if
* provided in the environment.
@ -71,15 +67,46 @@ public abstract class NativeAzureFileSystemBaseTest
private final long modifiedTimeErrorMargin = 5 * 1000; // Give it +/-5 seconds
public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class);
protected NativeAzureFileSystem fs;
@Override
public void setUp() throws Exception {
super.setUp();
fs = getFileSystem();
}
/**
* Assert that a path does not exist.
*
* @param message message to include in the assertion failure message
* @param path path in the filesystem
* @throws IOException IO problems
*/
public void assertPathDoesNotExist(String message,
Path path) throws IOException {
ContractTestUtils.assertPathDoesNotExist(fs, message, path);
}
/**
* Assert that a path exists.
*
* @param message message to include in the assertion failure message
* @param path path in the filesystem
* @throws IOException IO problems
*/
public void assertPathExists(String message,
Path path) throws IOException {
ContractTestUtils.assertPathExists(fs, message, path);
}
@Test
public void testCheckingNonExistentOneLetterFile() throws Exception {
assertFalse(fs.exists(new Path("/a")));
assertPathDoesNotExist("one letter file", new Path("/a"));
}
@Test
public void testStoreRetrieveFile() throws Exception {
Path testFile = new Path("unit-test-file");
Path testFile = methodPath();
writeString(testFile, "Testing");
assertTrue(fs.exists(testFile));
FileStatus status = fs.getFileStatus(testFile);
@ -93,7 +120,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testStoreDeleteFolder() throws Exception {
Path testFolder = new Path("storeDeleteFolder");
Path testFolder = methodPath();
assertFalse(fs.exists(testFolder));
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.exists(testFolder));
@ -105,22 +132,22 @@ public abstract class NativeAzureFileSystemBaseTest
assertEquals(new FsPermission((short) 0755), status.getPermission());
Path innerFile = new Path(testFolder, "innerFile");
assertTrue(fs.createNewFile(innerFile));
assertTrue(fs.exists(innerFile));
assertPathExists("inner file", innerFile);
assertTrue(fs.delete(testFolder, true));
assertFalse(fs.exists(innerFile));
assertFalse(fs.exists(testFolder));
assertPathDoesNotExist("inner file", innerFile);
assertPathDoesNotExist("testFolder", testFolder);
}
@Test
public void testFileOwnership() throws Exception {
Path testFile = new Path("ownershipTestFile");
Path testFile = methodPath();
writeString(testFile, "Testing");
testOwnership(testFile);
}
@Test
public void testFolderOwnership() throws Exception {
Path testFolder = new Path("ownershipTestFolder");
Path testFolder = methodPath();
fs.mkdirs(testFolder);
testOwnership(testFolder);
}
@ -147,7 +174,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testFilePermissions() throws Exception {
Path testFile = new Path("permissionTestFile");
Path testFile = methodPath();
FsPermission permission = FsPermission.createImmutable((short) 644);
createEmptyFile(testFile, permission);
FileStatus ret = fs.getFileStatus(testFile);
@ -157,7 +184,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testFolderPermissions() throws Exception {
Path testFolder = new Path("permissionTestFolder");
Path testFolder = methodPath();
FsPermission permission = FsPermission.createImmutable((short) 644);
fs.mkdirs(testFolder, permission);
FileStatus ret = fs.getFileStatus(testFolder);
@ -176,9 +203,9 @@ public abstract class NativeAzureFileSystemBaseTest
createEmptyFile(testFile, permission);
FsPermission rootPerm = fs.getFileStatus(firstDir.getParent()).getPermission();
FsPermission inheritPerm = FsPermission.createImmutable((short)(rootPerm.toShort() | 0300));
assertTrue(fs.exists(testFile));
assertTrue(fs.exists(firstDir));
assertTrue(fs.exists(middleDir));
assertPathExists("test file", testFile);
assertPathExists("firstDir", firstDir);
assertPathExists("middleDir", middleDir);
// verify that the indirectly created directory inherited its permissions from the root directory
FileStatus directoryStatus = fs.getFileStatus(middleDir);
assertTrue(directoryStatus.isDirectory());
@ -188,7 +215,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertFalse(fileStatus.isDirectory());
assertEqualsIgnoreStickyBit(umaskedPermission, fileStatus.getPermission());
assertTrue(fs.delete(firstDir, true));
assertFalse(fs.exists(testFile));
assertPathDoesNotExist("deleted file", testFile);
// An alternative test scenario would've been to delete the file first,
// and then check for the existence of the upper folders still. But that
@ -264,7 +291,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertTrue(fs.delete(new Path("deep"), true));
}
private static enum RenameFolderVariation {
private enum RenameFolderVariation {
CreateFolderAndInnerFile, CreateJustInnerFile, CreateJustFolder
}
@ -303,10 +330,10 @@ public abstract class NativeAzureFileSystemBaseTest
localFs.delete(localFilePath, true);
try {
writeString(localFs, localFilePath, "Testing");
Path dstPath = new Path("copiedFromLocal");
Path dstPath = methodPath();
assertTrue(FileUtil.copy(localFs, localFilePath, fs, dstPath, false,
fs.getConf()));
assertTrue(fs.exists(dstPath));
assertPathExists("coied from local", dstPath);
assertEquals("Testing", readString(fs, dstPath));
fs.delete(dstPath, true);
} finally {
@ -423,32 +450,32 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testReadingDirectoryAsFile() throws Exception {
Path dir = new Path("/x");
Path dir = methodPath();
assertTrue(fs.mkdirs(dir));
try {
fs.open(dir).close();
assertTrue("Should've thrown", false);
} catch (FileNotFoundException ex) {
assertEquals("/x is a directory not a file.", ex.getMessage());
assertExceptionContains("a directory not a file.", ex);
}
}
@Test
public void testCreatingFileOverDirectory() throws Exception {
Path dir = new Path("/x");
Path dir = methodPath();
assertTrue(fs.mkdirs(dir));
try {
fs.create(dir).close();
assertTrue("Should've thrown", false);
} catch (IOException ex) {
assertEquals("Cannot create file /x; already exists as a directory.",
ex.getMessage());
assertExceptionContains("Cannot create file", ex);
assertExceptionContains("already exists as a directory", ex);
}
}
@Test
public void testInputStreamReadWithZeroSizeBuffer() throws Exception {
Path newFile = new Path("zeroSizeRead");
Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@ -460,7 +487,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadWithBufferReturnsMinusOneOnEof() throws Exception {
Path newFile = new Path("eofRead");
Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@ -482,7 +509,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadWithBufferReturnsMinusOneOnEofForLargeBuffer() throws Exception {
Path newFile = new Path("eofRead2");
Path newFile = methodPath();
OutputStream output = fs.create(newFile);
byte[] outputBuff = new byte[97331];
for(int i = 0; i < outputBuff.length; ++i) {
@ -508,7 +535,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testInputStreamReadIntReturnsMinusOneOnEof() throws Exception {
Path newFile = new Path("eofRead3");
Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(10);
output.close();
@ -525,7 +552,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetPermissionOnFile() throws Exception {
Path newFile = new Path("testPermission");
Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
@ -540,14 +567,14 @@ public abstract class NativeAzureFileSystemBaseTest
// Don't check the file length for page blobs. Only block blobs
// provide the actual length of bytes written.
if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
if (!(this instanceof ITestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
}
@Test
public void testSetPermissionOnFolder() throws Exception {
Path newFolder = new Path("testPermission");
Path newFolder = methodPath();
assertTrue(fs.mkdirs(newFolder));
FsPermission newPermission = new FsPermission((short) 0600);
fs.setPermission(newFolder, newPermission);
@ -559,7 +586,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetOwnerOnFile() throws Exception {
Path newFile = new Path("testOwner");
Path newFile = methodPath();
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
@ -571,7 +598,7 @@ public abstract class NativeAzureFileSystemBaseTest
// File length is only reported to be the size of bytes written to the file for block blobs.
// So only check it for block blobs, not page blobs.
if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
if (!(this instanceof ITestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
fs.setOwner(newFile, null, "newGroup");
@ -583,7 +610,7 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testSetOwnerOnFolder() throws Exception {
Path newFolder = new Path("testOwner");
Path newFolder = methodPath();
assertTrue(fs.mkdirs(newFolder));
fs.setOwner(newFolder, "newUser", null);
FileStatus newStatus = fs.getFileStatus(newFolder);
@ -594,21 +621,21 @@ public abstract class NativeAzureFileSystemBaseTest
@Test
public void testModifiedTimeForFile() throws Exception {
Path testFile = new Path("testFile");
Path testFile = methodPath();
fs.create(testFile).close();
testModifiedTime(testFile);
}
@Test
public void testModifiedTimeForFolder() throws Exception {
Path testFolder = new Path("testFolder");
Path testFolder = methodPath();
assertTrue(fs.mkdirs(testFolder));
testModifiedTime(testFolder);
}
@Test
public void testFolderLastModifiedTime() throws Exception {
Path parentFolder = new Path("testFolder");
Path parentFolder = methodPath();
Path innerFile = new Path(parentFolder, "innerfile");
assertTrue(fs.mkdirs(parentFolder));
@ -983,7 +1010,7 @@ public abstract class NativeAzureFileSystemBaseTest
// Make sure rename pending file is gone.
FileStatus[] listed = fs.listStatus(new Path("/"));
assertEquals(1, listed.length);
assertEquals("Pending directory still found", 1, listed.length);
assertTrue(listed[0].isDirectory());
}
@ -1681,7 +1708,7 @@ public abstract class NativeAzureFileSystemBaseTest
assertTrue("Unanticipated exception", false);
}
} else {
assertTrue("Unknown thread name", false);
fail("Unknown thread name");
}
LOG.info(name + " is exiting.");

View File

@ -1,22 +0,0 @@
========================================================================
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=========================================================================
In order to run Windows Azure Storage Blob (WASB) unit tests against a live
Azure Storage account, you need to provide test account details in a configuration
file called azure-test.xml. See hadoop-tools/hadoop-azure/README.txt for details
on configuration, and how to run the tests.

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeNotNull;
/**
* Extends TestAzureConcurrentOutOfBandIo in order to run testReadOOBWrites with secure mode
* (fs.azure.secure.mode) both enabled and disabled.
*/
public class TestAzureConcurrentOutOfBandIoWithSecureMode extends TestAzureConcurrentOutOfBandIo {
// Overridden TestCase methods.
@Before
@Override
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createOutOfBandStore(
UPLOAD_BLOCK_SIZE, DOWNLOAD_BLOCK_SIZE, true);
assumeNotNull(testAccount);
}
}

View File

@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
@ -42,7 +37,7 @@ import org.junit.Test;
/**
* Tests that we put the correct metadata on blobs created through WASB.
*/
public class TestBlobMetadata {
public class TestBlobMetadata extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;

View File

@ -33,9 +33,6 @@ import org.junit.Test;
import java.net.HttpURLConnection;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertEquals;
/**
* Tests for <code>BlobOperationDescriptor</code>.
*/

View File

@ -21,13 +21,10 @@ package org.apache.hadoop.fs.azure;
import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
/**
* Tests for <code>ClientThrottlingAnalyzer</code>.
*/
public class TestClientThrottlingAnalyzer {
public class TestClientThrottlingAnalyzer extends AbstractWasbTestWithTimeout {
private static final int ANALYSIS_PERIOD = 1000;
private static final int ANALYSIS_PERIOD_PLUS_10_PERCENT = ANALYSIS_PERIOD
+ ANALYSIS_PERIOD / 10;

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@ -49,8 +48,8 @@ public class TestNativeAzureFileSystemAuthorization
protected MockWasbAuthorizerImpl authorizer;
@Override
public Configuration getConfiguration() {
Configuration conf = super.getConfiguration();
public Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost/");
conf.set(NativeAzureFileSystem.AZURE_CHOWN_USERLIST_PROPERTY_NAME, "user1 , user2");
@ -59,13 +58,12 @@ public class TestNativeAzureFileSystemAuthorization
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
Configuration conf = getConfiguration();
return AzureBlobStorageTestAccount.create(conf);
return AzureBlobStorageTestAccount.create(createConfiguration());
}
@Before
public void beforeMethod() {
@Override
public void setUp() throws Exception {
super.setUp();
boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false);
boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false);
Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping",
@ -76,7 +74,6 @@ public class TestNativeAzureFileSystemAuthorization
fs.updateWasbAuthorizer(authorizer);
}
@Rule
public ExpectedException expectedEx = ExpectedException.none();
@ -95,7 +92,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Setup the expected exception class, and exception message that the test is supposed to fail with
* Setup the expected exception class, and exception message that the test is supposed to fail with.
*/
protected void setExpectedFailureMessage(String operation, Path path) {
expectedEx.expect(WasbAuthorizationException.class);
@ -104,7 +101,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Positive test to verify Create access check
* Positive test to verify Create access check.
* The file is created directly under an existing folder.
* No intermediate folders need to be created.
* @throws Throwable
@ -128,7 +125,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Positive test to verify Create access check
* Positive test to verify Create access check.
* The test tries to create a file whose parent is non-existent to ensure that
* the intermediate folders between ancestor and direct parent are being created
* when proper ranger policies are configured.
@ -155,7 +152,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Negative test to verify that create fails when trying to overwrite an existing file
* Negative test to verify that create fails when trying to overwrite an existing file.
* without proper write permissions on the file being overwritten.
* @throws Throwable
*/
@ -181,7 +178,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Positive test to verify that create succeeds when trying to overwrite an existing file
* Positive test to verify that create succeeds when trying to overwrite an existing file.
* when proper write permissions on the file being overwritten are provided.
* @throws Throwable
*/
@ -232,7 +229,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Positive test to verify listStatus access check
* Positive test to verify listStatus access check.
* @throws Throwable
*/
@Test
@ -257,7 +254,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Negative test to verify listStatus access check
* Negative test to verify listStatus access check.
* @throws Throwable
*/
@ -342,7 +339,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Negative test to verify rename access check - the dstFolder disallows rename
* Negative test to verify rename access check - the dstFolder disallows rename.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@ -373,7 +370,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Positive test to verify rename access check - the dstFolder allows rename
* Positive test to verify rename access check - the dstFolder allows rename.
* @throws Throwable
*/
@Test
@ -484,7 +481,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Positive test to verify file delete access check
* Positive test to verify file delete access check.
* @throws Throwable
*/
@Test
@ -506,7 +503,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Negative test to verify file delete access check
* Negative test to verify file delete access check.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@ -544,7 +541,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Positive test to verify file delete access check, with intermediate folders
* Uses wildcard recursive permissions
* Uses wildcard recursive permissions.
* @throws Throwable
*/
@Test
@ -582,7 +579,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Positive test for mkdirs access check
* Positive test for mkdirs access check.
* @throws Throwable
*/
@Test
@ -668,7 +665,7 @@ public class TestNativeAzureFileSystemAuthorization
}
}
/**
* Negative test for mkdirs access check
* Negative test for mkdirs access check.
* @throws Throwable
*/
@Test //(expected=WasbAuthorizationException.class)
@ -692,7 +689,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Positive test triple slash format (wasb:///) access check
* Positive test triple slash format (wasb:///) access check.
* @throws Throwable
*/
@Test
@ -708,7 +705,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
* Negative test for setOwner when Authorization is enabled
* Negative test for setOwner when Authorization is enabled.
*/
@Test
public void testSetOwnerThrowsForUnauthorisedUsers() throws Throwable {
@ -744,7 +741,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Test for setOwner when Authorization is enabled and
* the user is specified in chown allowed user list
* the user is specified in chown allowed user list.
* */
@Test
public void testSetOwnerSucceedsForAuthorisedUsers() throws Throwable {
@ -785,7 +782,7 @@ public class TestNativeAzureFileSystemAuthorization
/**
* Test for setOwner when Authorization is enabled and
* the userlist is specified as '*'
* the userlist is specified as '*'.
* */
@Test
public void testSetOwnerSucceedsForAnyUserWhenWildCardIsSpecified() throws Throwable {
@ -829,7 +826,7 @@ public class TestNativeAzureFileSystemAuthorization
}
/** Test for setOwner throws for illegal setup of chown
* allowed testSetOwnerSucceedsForAuthorisedUsers
* allowed testSetOwnerSucceedsForAuthorisedUsers.
*/
@Test
public void testSetOwnerFailsForIllegalSetup() throws Throwable {

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
@ -29,7 +27,11 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
public class TestNativeAzureFileSystemBlockLocations {
/**
* Test block location logic.
*/
public class TestNativeAzureFileSystemBlockLocations
extends AbstractWasbTestWithTimeout {
@Test
public void testNumberOfBlocks() throws Exception {
Configuration conf = new Configuration();

View File

@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
@ -33,32 +28,30 @@ import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestNativeAzureFileSystemConcurrency {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
public class TestNativeAzureFileSystemConcurrency extends AbstractWasbTestBase {
private InMemoryBlockBlobStore backingStore;
@Before
@Override
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
fs = testAccount.getFileSystem();
backingStore = testAccount.getMockStorage().getBackingStore();
super.setUp();
backingStore = getTestAccount().getMockStorage().getBackingStore();
}
@After
@Override
public void tearDown() throws Exception {
testAccount.cleanup();
fs = null;
super.tearDown();
backingStore = null;
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.createMock();
}
@Test
public void testLinkBlobs() throws Exception {
Path filePath = new Path("/inProgress");

View File

@ -23,6 +23,9 @@ import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
/**
* Mocked testing of FileSystemContractBaseTest.
*/
public class TestNativeAzureFileSystemContractMocked extends
FileSystemContractBaseTest {

View File

@ -18,17 +18,11 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.HashMap;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
@ -38,24 +32,18 @@ import org.junit.Test;
* creation/rename of files/directories through WASB that have colons in the
* names.
*/
public class TestNativeAzureFileSystemFileNameCheck {
private FileSystem fs = null;
private AzureBlobStorageTestAccount testAccount = null;
public class TestNativeAzureFileSystemFileNameCheck extends AbstractWasbTestBase {
private String root = null;
@Before
@Override
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
fs = testAccount.getFileSystem();
super.setUp();
root = fs.getUri().toString();
}
@After
public void tearDown() throws Exception {
testAccount.cleanup();
root = null;
fs = null;
testAccount = null;
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.createMock();
}
@Test
@ -138,4 +126,4 @@ public class TestNativeAzureFileSystemFileNameCheck {
fsck.run(new String[] { p.toString() });
return fsck.getPathNameWarning();
}
}
}

View File

@ -21,6 +21,10 @@ package org.apache.hadoop.fs.azure;
import java.io.IOException;
import org.junit.Ignore;
/**
* Run {@link NativeAzureFileSystemBaseTest} tests against a mocked store,
* skipping tests of unsupported features
*/
public class TestNativeAzureFileSystemMocked extends
NativeAzureFileSystemBaseTest {

View File

@ -18,41 +18,27 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
/**
* Tests for the upload, buffering and flush logic in WASB.
*/
public class TestNativeAzureFileSystemUploadLogic {
private AzureBlobStorageTestAccount testAccount;
public class TestNativeAzureFileSystemUploadLogic extends AbstractWasbTestBase {
// Just an arbitrary number so that the values I write have a predictable
// pattern: 0, 1, 2, .. , 45, 46, 0, 1, 2, ...
static final int byteValuePeriod = 47;
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.createMock();
}
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.createMock();
}
/**
@ -126,9 +112,9 @@ public class TestNativeAzureFileSystemUploadLogic {
* @param expectedSize The expected size of the data in there.
*/
private void assertDataInFile(Path file, int expectedSize) throws Exception {
InputStream inStream = testAccount.getFileSystem().open(file);
assertDataInStream(inStream, expectedSize);
inStream.close();
try(InputStream inStream = getFileSystem().open(file)) {
assertDataInStream(inStream, expectedSize);
}
}
/**
@ -139,7 +125,7 @@ public class TestNativeAzureFileSystemUploadLogic {
private void assertDataInTempBlob(int expectedSize) throws Exception {
// Look for the temporary upload blob in the backing store.
InMemoryBlockBlobStore backingStore =
testAccount.getMockStorage().getBackingStore();
getTestAccount().getMockStorage().getBackingStore();
String tempKey = null;
for (String key : backingStore.getKeys()) {
if (key.contains(NativeAzureFileSystem.AZURE_TEMP_FOLDER)) {
@ -149,9 +135,10 @@ public class TestNativeAzureFileSystemUploadLogic {
}
}
assertNotNull(tempKey);
InputStream inStream = new ByteArrayInputStream(backingStore.getContent(tempKey));
assertDataInStream(inStream, expectedSize);
inStream.close();
try (InputStream inStream = new ByteArrayInputStream(
backingStore.getContent(tempKey))) {
assertDataInStream(inStream, expectedSize);
}
}
/**
@ -162,25 +149,30 @@ public class TestNativeAzureFileSystemUploadLogic {
*/
private void testConsistencyAfterManyFlushes(FlushFrequencyVariation variation)
throws Exception {
Path uploadedFile = new Path("/uploadedFile");
OutputStream outStream = testAccount.getFileSystem().create(uploadedFile);
final int totalSize = 9123;
int flushPeriod;
switch (variation) {
case BeforeSingleBufferFull: flushPeriod = 300; break;
case AfterSingleBufferFull: flushPeriod = 600; break;
case AfterAllRingBufferFull: flushPeriod = 1600; break;
default:
throw new IllegalArgumentException("Unknown variation: " + variation);
}
for (int i = 0; i < totalSize; i++) {
outStream.write(i % byteValuePeriod);
if ((i + 1) % flushPeriod == 0) {
outStream.flush();
assertDataInTempBlob(i + 1);
Path uploadedFile = methodPath();
try {
OutputStream outStream = getFileSystem().create(uploadedFile);
final int totalSize = 9123;
int flushPeriod;
switch (variation) {
case BeforeSingleBufferFull: flushPeriod = 300; break;
case AfterSingleBufferFull: flushPeriod = 600; break;
case AfterAllRingBufferFull: flushPeriod = 1600; break;
default:
throw new IllegalArgumentException("Unknown variation: " + variation);
}
for (int i = 0; i < totalSize; i++) {
outStream.write(i % byteValuePeriod);
if ((i + 1) % flushPeriod == 0) {
outStream.flush();
assertDataInTempBlob(i + 1);
}
}
outStream.close();
assertDataInFile(uploadedFile, totalSize);
} finally {
getFileSystem().delete(uploadedFile, false);
}
outStream.close();
assertDataInFile(uploadedFile, totalSize);
}
}

View File

@ -18,11 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.util.HashMap;
import org.apache.hadoop.fs.FileStatus;
@ -37,7 +32,8 @@ import org.junit.Test;
* Tests that WASB handles things gracefully when users add blobs to the Azure
* Storage container from outside WASB's control.
*/
public class TestOutOfBandAzureBlobOperations {
public class TestOutOfBandAzureBlobOperations
extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;

View File

@ -19,20 +19,23 @@
package org.apache.hadoop.fs.azure;
import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
import static org.junit.Assert.assertEquals;
import java.io.File;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestShellDecryptionKeyProvider {
public static final Log LOG = LogFactory
.getLog(TestShellDecryptionKeyProvider.class);
/**
* Windows only tests of shell scripts to provide decryption keys.
*/
public class TestShellDecryptionKeyProvider
extends AbstractWasbTestWithTimeout {
public static final Logger LOG = LoggerFactory
.getLogger(TestShellDecryptionKeyProvider.class);
private static File TEST_ROOT_DIR = new File(System.getProperty(
"test.build.data", "/tmp"), "TestShellDecryptionKeyProvider");

View File

@ -18,10 +18,6 @@
package org.apache.hadoop.fs.azure;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@ -32,7 +28,10 @@ import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
public class TestWasbFsck {
/**
* Tests which look at fsck recovery.
*/
public class TestWasbFsck extends AbstractWasbTestWithTimeout {
private AzureBlobStorageTestAccount testAccount;
private FileSystem fs;
private InMemoryBlockBlobStore backingStore;

View File

@ -21,10 +21,14 @@ package org.apache.hadoop.fs.azure.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.Test;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
public class TestAzureNativeContractAppend extends AbstractContractAppendTest {
/**
* Append test, skipping one of them.
*/
public class ITestAzureNativeContractAppend extends AbstractContractAppendTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
@ -34,4 +38,4 @@ public class TestAzureNativeContractAppend extends AbstractContractAppendTest {
public void testRenameFileBeingAppended() throws Throwable {
skip("Skipping as renaming an opened file is not supported");
}
}
}

View File

@ -22,7 +22,11 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestAzureNativeContractCreate extends AbstractContractCreateTest{
/**
* Contract test.
*/
public class ITestAzureNativeContractCreate extends AbstractContractCreateTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);

View File

@ -22,9 +22,12 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestAzureNativeContractDelete extends AbstractContractDeleteTest {
/**
* Contract test.
*/
public class ITestAzureNativeContractDelete extends AbstractContractDeleteTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
}
}

View File

@ -19,15 +19,29 @@
package org.apache.hadoop.fs.azure.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.assumeScaleTestsEnabled;
/**
* Contract test suite covering WASB integration with DistCp.
*/
public class TestAzureNativeContractDistCp extends AbstractContractDistCpTest {
public class ITestAzureNativeContractDistCp extends AbstractContractDistCpTest {
@Override
protected int getTestTimeoutMillis() {
return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS;
}
@Override
protected NativeAzureFileSystemContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
@Override
public void setup() throws Exception {
super.setup();
assumeScaleTestsEnabled(getContract().getConf());
}
}

View File

@ -22,7 +22,12 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestAzureNativeContractGetFileStatus extends AbstractContractGetFileStatusTest {
/**
* Contract test.
*/
public class ITestAzureNativeContractGetFileStatus
extends AbstractContractGetFileStatusTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);

View File

@ -22,9 +22,12 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestAzureNativeContractMkdir extends AbstractContractMkdirTest {
/**
* Contract test.
*/
public class ITestAzureNativeContractMkdir extends AbstractContractMkdirTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
}
}

View File

@ -22,9 +22,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestAzureNativeContractOpen extends AbstractContractOpenTest {
/**
* Contract test.
*/
public class ITestAzureNativeContractOpen extends AbstractContractOpenTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
}
}

View File

@ -22,9 +22,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestAzureNativeContractRename extends AbstractContractRenameTest {
/**
* Contract test.
*/
public class ITestAzureNativeContractRename extends AbstractContractRenameTest {
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
}
}

View File

@ -22,9 +22,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
public class TestAzureNativeContractSeek extends AbstractContractSeekTest{
/**
* Contract test.
*/
public class ITestAzureNativeContractSeek extends AbstractContractSeekTest{
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new NativeAzureFileSystemContract(conf);
}
}
}

View File

@ -18,15 +18,21 @@
package org.apache.hadoop.fs.azure.contract;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
/**
* Azure Contract. Test paths are created using any maven fork
* identifier, if defined. This guarantees paths unique to tests
* running in parallel.
*/
public class NativeAzureFileSystemContract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "wasb.xml";
protected NativeAzureFileSystemContract(Configuration conf) {
super(conf);
//insert the base features
public NativeAzureFileSystemContract(Configuration conf) {
super(conf); //insert the base features
addConfResource(CONTRACT_XML);
}
@ -34,4 +40,9 @@ public class NativeAzureFileSystemContract extends AbstractBondedFSContract {
public String getScheme() {
return "wasb";
}
}
@Override
public Path getTestPath() {
return AzureTestUtils.createTestPath(super.getTestPath());
}
}

View File

@ -0,0 +1,66 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.integration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
/**
* Scale tests are only executed if the scale profile
* is set; the setup method will check this and skip
* tests if not.
*
*/
public abstract class AbstractAzureScaleTest
extends AbstractWasbTestBase implements Sizes {
protected static final Logger LOG =
LoggerFactory.getLogger(AbstractAzureScaleTest.class);
@Override
protected int getTestTimeoutMillis() {
return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS;
}
@Override
public void setUp() throws Exception {
super.setUp();
LOG.debug("Scale test operation count = {}", getOperationCount());
assumeScaleTestsEnabled(getConfiguration());
}
/**
* Create the test account.
* @return a test account
* @throws Exception on any failure to create the account.
*/
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create(createConfiguration());
}
protected long getOperationCount() {
return getConfiguration().getLong(KEY_OPERATION_COUNT,
DEFAULT_OPERATION_COUNT);
}
}

View File

@ -0,0 +1,180 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.integration;
import org.apache.hadoop.fs.Path;
/**
* Constants for the Azure tests.
*/
public interface AzureTestConstants {
/**
* Prefix for any cross-filesystem scale test options.
*/
String SCALE_TEST = "scale.test.";
/**
* Prefix for wasb-specific scale tests.
*/
String AZURE_SCALE_TEST = "fs.azure.scale.test.";
/**
* Prefix for FS wasb tests.
*/
String TEST_FS_WASB = "test.fs.azure.";
/**
* Name of the test filesystem.
*/
String TEST_FS_WASB_NAME = TEST_FS_WASB + "name";
/**
* Tell tests that they are being executed in parallel: {@value}.
*/
String KEY_PARALLEL_TEST_EXECUTION = "test.parallel.execution";
/**
* A property set to true in maven if scale tests are enabled: {@value}.
*/
String KEY_SCALE_TESTS_ENABLED = AZURE_SCALE_TEST + "enabled";
/**
* The number of operations to perform: {@value}.
*/
String KEY_OPERATION_COUNT = SCALE_TEST + "operation.count";
/**
* The number of directory operations to perform: {@value}.
*/
String KEY_DIRECTORY_COUNT = SCALE_TEST + "directory.count";
/**
* The readahead buffer: {@value}.
*/
String KEY_READ_BUFFER_SIZE = AZURE_SCALE_TEST + "read.buffer.size";
int DEFAULT_READ_BUFFER_SIZE = 16384;
/**
* Key for a multi MB test file: {@value}.
*/
String KEY_CSVTEST_FILE = AZURE_SCALE_TEST + "csvfile";
/**
* Default path for the multi MB test file: {@value}.
*/
String DEFAULT_CSVTEST_FILE = "wasb://datasets@azuremlsampleexperiments.blob.core.windows.net/network_intrusion_detection.csv";
/**
* Name of the property to define the timeout for scale tests: {@value}.
* Measured in seconds.
*/
String KEY_TEST_TIMEOUT = AZURE_SCALE_TEST + "timeout";
/**
* Name of the property to define the file size for the huge file
* tests: {@value}.
* Measured in KB; a suffix like "M", or "G" will change the unit.
*/
String KEY_HUGE_FILESIZE = AZURE_SCALE_TEST + "huge.filesize";
/**
* Name of the property to define the partition size for the huge file
* tests: {@value}.
* Measured in KB; a suffix like "M", or "G" will change the unit.
*/
String KEY_HUGE_PARTITION_SIZE = AZURE_SCALE_TEST + "huge.partitionsize";
/**
* The default huge size is small full 5GB+ scale tests are something
* to run in long test runs on EC2 VMs. {@value}.
*/
String DEFAULT_HUGE_FILESIZE = "10M";
/**
* The default number of operations to perform: {@value}.
*/
long DEFAULT_OPERATION_COUNT = 2005;
/**
* Default number of directories to create when performing
* directory performance/scale tests.
*/
int DEFAULT_DIRECTORY_COUNT = 2;
/**
* Default policy on scale tests: {@value}.
*/
boolean DEFAULT_SCALE_TESTS_ENABLED = false;
/**
* Fork ID passed down from maven if the test is running in parallel.
*/
String TEST_UNIQUE_FORK_ID = "test.unique.fork.id";
/**
* Timeout in Milliseconds for standard tests: {@value}.
*/
int AZURE_TEST_TIMEOUT = 10 * 60 * 1000;
/**
* Timeout in Seconds for Scale Tests: {@value}.
*/
int SCALE_TEST_TIMEOUT_SECONDS = 30 * 60;
int SCALE_TEST_TIMEOUT_MILLIS = SCALE_TEST_TIMEOUT_SECONDS * 1000;
String ACCOUNT_KEY_PROPERTY_NAME
= "fs.azure.account.key.";
String SAS_PROPERTY_NAME = "fs.azure.sas.";
String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml";
String TEST_ACCOUNT_NAME_PROPERTY_NAME
= "fs.azure.test.account.name";
String MOCK_ACCOUNT_NAME
= "mockAccount.blob.core.windows.net";
String MOCK_CONTAINER_NAME = "mockContainer";
String WASB_AUTHORITY_DELIMITER = "@";
String WASB_SCHEME = "wasb";
String PATH_DELIMITER = "/";
String AZURE_ROOT_CONTAINER = "$root";
String MOCK_WASB_URI = "wasb://" + MOCK_CONTAINER_NAME
+ WASB_AUTHORITY_DELIMITER + MOCK_ACCOUNT_NAME + "/";
String USE_EMULATOR_PROPERTY_NAME
= "fs.azure.test.emulator";
String KEY_DISABLE_THROTTLING
= "fs.azure.disable.bandwidth.throttling";
String KEY_READ_TOLERATE_CONCURRENT_APPEND
= "fs.azure.io.read.tolerate.concurrent.append";
/**
* Path for page blobs: {@value}.
*/
String DEFAULT_PAGE_BLOB_DIRECTORY = "pageBlobs";
String DEFAULT_ATOMIC_RENAME_DIRECTORIES
= "/atomicRenameDir1,/atomicRenameDir2";
/**
* Base directory for page blobs.
*/
Path PAGE_BLOB_DIR = new Path("/" + DEFAULT_PAGE_BLOB_DIRECTORY);
}

View File

@ -0,0 +1,479 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.integration;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.internal.AssumptionViolatedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
import static org.apache.hadoop.fs.azure.integration.AzureTestConstants.*;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getLongGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
/**
* Utilities for the Azure tests. Based on {@code S3ATestUtils}, so
* (initially) has unused method.
*/
public final class AzureTestUtils extends Assert {
private static final Logger LOG = LoggerFactory.getLogger(
AzureTestUtils.class);
/**
* Value to set a system property to (in maven) to declare that
* a property has been unset.
*/
public static final String UNSET_PROPERTY = "unset";
/**
* Create the test filesystem.
*
* If the test.fs.wasb.name property is not set, this will
* raise a JUnit assumption exception
*
* @param conf configuration
* @return the FS
* @throws IOException IO Problems
* @throws AssumptionViolatedException if the FS is not named
*/
public static NativeAzureFileSystem createTestFileSystem(Configuration conf)
throws IOException {
String fsname = conf.getTrimmed(TEST_FS_WASB_NAME, "");
boolean liveTest = !StringUtils.isEmpty(fsname);
URI testURI = null;
if (liveTest) {
testURI = URI.create(fsname);
liveTest = testURI.getScheme().equals(WASB_SCHEME);
}
if (!liveTest) {
// Skip the test
throw new AssumptionViolatedException(
"No test filesystem in " + TEST_FS_WASB_NAME);
}
NativeAzureFileSystem fs1 = new NativeAzureFileSystem();
fs1.initialize(testURI, conf);
return fs1;
}
/**
* Create a file context for tests.
*
* If the test.fs.wasb.name property is not set, this will
* trigger a JUnit failure.
*
* Multipart purging is enabled.
* @param conf configuration
* @return the FS
* @throws IOException IO Problems
* @throws AssumptionViolatedException if the FS is not named
*/
public static FileContext createTestFileContext(Configuration conf)
throws IOException {
String fsname = conf.getTrimmed(TEST_FS_WASB_NAME, "");
boolean liveTest = !StringUtils.isEmpty(fsname);
URI testURI = null;
if (liveTest) {
testURI = URI.create(fsname);
liveTest = testURI.getScheme().equals(WASB_SCHEME);
}
if (!liveTest) {
// This doesn't work with our JUnit 3 style test cases, so instead we'll
// make this whole class not run by default
throw new AssumptionViolatedException("No test filesystem in "
+ TEST_FS_WASB_NAME);
}
FileContext fc = FileContext.getFileContext(testURI, conf);
return fc;
}
/**
* Get a long test property.
* <ol>
* <li>Look up configuration value (which can pick up core-default.xml),
* using {@code defVal} as the default value (if conf != null).
* </li>
* <li>Fetch the system property.</li>
* <li>If the system property is not empty or "(unset)":
* it overrides the conf value.
* </li>
* </ol>
* This puts the build properties in charge of everything. It's not a
* perfect design; having maven set properties based on a file, as ant let
* you do, is better for customization.
*
* As to why there's a special (unset) value, see
* {@link http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven}
* @param conf config: may be null
* @param key key to look up
* @param defVal default value
* @return the evaluated test property.
*/
public static long getTestPropertyLong(Configuration conf,
String key, long defVal) {
return Long.valueOf(
getTestProperty(conf, key, Long.toString(defVal)));
}
/**
* Get a test property value in bytes, using k, m, g, t, p, e suffixes.
* {@link org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix#string2long(String)}
* <ol>
* <li>Look up configuration value (which can pick up core-default.xml),
* using {@code defVal} as the default value (if conf != null).
* </li>
* <li>Fetch the system property.</li>
* <li>If the system property is not empty or "(unset)":
* it overrides the conf value.
* </li>
* </ol>
* This puts the build properties in charge of everything. It's not a
* perfect design; having maven set properties based on a file, as ant let
* you do, is better for customization.
*
* As to why there's a special (unset) value, see
* {@link http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven}
* @param conf config: may be null
* @param key key to look up
* @param defVal default value
* @return the evaluated test property.
*/
public static long getTestPropertyBytes(Configuration conf,
String key, String defVal) {
return org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix
.string2long(getTestProperty(conf, key, defVal));
}
/**
* Get an integer test property; algorithm described in
* {@link #getTestPropertyLong(Configuration, String, long)}.
* @param key key to look up
* @param defVal default value
* @return the evaluated test property.
*/
public static int getTestPropertyInt(Configuration conf,
String key, int defVal) {
return (int) getTestPropertyLong(conf, key, defVal);
}
/**
* Get a boolean test property; algorithm described in
* {@link #getTestPropertyLong(Configuration, String, long)}.
* @param key key to look up
* @param defVal default value
* @return the evaluated test property.
*/
public static boolean getTestPropertyBool(Configuration conf,
String key,
boolean defVal) {
return Boolean.valueOf(
getTestProperty(conf, key, Boolean.toString(defVal)));
}
/**
* Get a string test property.
* <ol>
* <li>Look up configuration value (which can pick up core-default.xml),
* using {@code defVal} as the default value (if conf != null).
* </li>
* <li>Fetch the system property.</li>
* <li>If the system property is not empty or "(unset)":
* it overrides the conf value.
* </li>
* </ol>
* This puts the build properties in charge of everything. It's not a
* perfect design; having maven set properties based on a file, as ant let
* you do, is better for customization.
*
* As to why there's a special (unset) value, see
* @see <a href="http://stackoverflow.com/questions/7773134/null-versus-empty-arguments-in-maven">
* Stack Overflow</a>
* @param conf config: may be null
* @param key key to look up
* @param defVal default value
* @return the evaluated test property.
*/
public static String getTestProperty(Configuration conf,
String key,
String defVal) {
String confVal = conf != null
? conf.getTrimmed(key, defVal)
: defVal;
String propval = System.getProperty(key);
return StringUtils.isNotEmpty(propval) && !UNSET_PROPERTY.equals(propval)
? propval : confVal;
}
/**
* Verify the class of an exception. If it is not as expected, rethrow it.
* Comparison is on the exact class, not subclass-of inference as
* offered by {@code instanceof}.
* @param clazz the expected exception class
* @param ex the exception caught
* @return the exception, if it is of the expected class
* @throws Exception the exception passed in.
*/
public static Exception verifyExceptionClass(Class clazz,
Exception ex)
throws Exception {
if (!(ex.getClass().equals(clazz))) {
throw ex;
}
return ex;
}
/**
* Turn off FS Caching: use if a filesystem with different options from
* the default is required.
* @param conf configuration to patch
*/
public static void disableFilesystemCaching(Configuration conf) {
conf.setBoolean("fs.wasb.impl.disable.cache", true);
}
/**
* Create a test path, using the value of
* {@link AzureTestUtils#TEST_UNIQUE_FORK_ID} if it is set.
* @param defVal default value
* @return a path
*/
public static Path createTestPath(Path defVal) {
String testUniqueForkId = System.getProperty(
AzureTestConstants.TEST_UNIQUE_FORK_ID);
return testUniqueForkId == null
? defVal
: new Path("/" + testUniqueForkId, "test");
}
/**
* Create a test page blob path using the value of
* {@link AzureTestConstants#TEST_UNIQUE_FORK_ID} if it is set.
* @param filename filename at the end of the path
* @return an absolute path
*/
public static Path blobPathForTests(FileSystem fs, String filename) {
String testUniqueForkId = System.getProperty(
AzureTestConstants.TEST_UNIQUE_FORK_ID);
return fs.makeQualified(new Path(PAGE_BLOB_DIR,
testUniqueForkId == null
? filename
: (testUniqueForkId + "/" + filename)));
}
/**
* Create a test path using the value of
* {@link AzureTestConstants#TEST_UNIQUE_FORK_ID} if it is set.
* @param filename filename at the end of the path
* @return an absolute path
*/
public static Path pathForTests(FileSystem fs, String filename) {
String testUniqueForkId = System.getProperty(
AzureTestConstants.TEST_UNIQUE_FORK_ID);
return fs.makeQualified(new Path(
testUniqueForkId == null
? ("/test/" + filename)
: (testUniqueForkId + "/" + filename)));
}
/**
* Get a unique fork ID.
* Returns a default value for non-parallel tests.
* @return a string unique for all test VMs running in this maven build.
*/
public static String getForkID() {
return System.getProperty(
AzureTestConstants.TEST_UNIQUE_FORK_ID, "fork-1");
}
/**
* Flag to indicate that this test is being executed in parallel.
* This is used by some of the scale tests to validate test time expectations.
* @return true if the build indicates this test is being run in parallel.
*/
public static boolean isParallelExecution() {
return Boolean.getBoolean(KEY_PARALLEL_TEST_EXECUTION);
}
/**
* Asserts that {@code obj} is an instance of {@code expectedClass} using a
* descriptive assertion message.
* @param expectedClass class
* @param obj object to check
*/
public static void assertInstanceOf(Class<?> expectedClass, Object obj) {
Assert.assertTrue(String.format("Expected instance of class %s, but is %s.",
expectedClass, obj.getClass()),
expectedClass.isAssignableFrom(obj.getClass()));
}
/**
* Builds a comma-separated list of class names.
* @param classes list of classes
* @return comma-separated list of class names
*/
public static <T extends Class<?>> String buildClassListString(
List<T> classes) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < classes.size(); ++i) {
if (i > 0) {
sb.append(',');
}
sb.append(classes.get(i).getName());
}
return sb.toString();
}
/**
* This class should not be instantiated.
*/
private AzureTestUtils() {
}
/**
* Assert that a configuration option matches the expected value.
* @param conf configuration
* @param key option key
* @param expected expected value
*/
public static void assertOptionEquals(Configuration conf,
String key,
String expected) {
assertEquals("Value of " + key, expected, conf.get(key));
}
/**
* Assume that a condition is met. If not: log at WARN and
* then throw an {@link AssumptionViolatedException}.
* @param message message in an assumption
* @param condition condition to probe
*/
public static void assume(String message, boolean condition) {
if (!condition) {
LOG.warn(message);
}
Assume.assumeTrue(message, condition);
}
/**
* Gets the current value of the given gauge.
* @param fs filesystem
* @param gaugeName gauge name
* @return the gauge value
*/
public static long getLongGaugeValue(NativeAzureFileSystem fs,
String gaugeName) {
return getLongGauge(gaugeName, getMetrics(fs.getInstrumentation()));
}
/**
* Gets the current value of the given counter.
* @param fs filesystem
* @param counterName counter name
* @return the counter value
*/
public static long getLongCounterValue(NativeAzureFileSystem fs,
String counterName) {
return getLongCounter(counterName, getMetrics(fs.getInstrumentation()));
}
/**
* Delete a path, catching any exception and downgrading to a log message.
* @param fs filesystem
* @param path path to delete
* @param recursive recursive delete?
* @throws IOException IO failure.
*/
public static void deleteQuietly(FileSystem fs,
Path path,
boolean recursive) throws IOException {
if (fs != null && path != null) {
try {
fs.delete(path, recursive);
} catch (IOException e) {
LOG.warn("When deleting {}", path, e);
}
}
}
/**
* Clean up the test account if non-null; return null to put in the
* field.
* @param testAccount test account to clean up
* @return null
* @throws Execption cleanup problems
*/
public static AzureBlobStorageTestAccount cleanup(
AzureBlobStorageTestAccount testAccount) throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
}
return null;
}
/**
* Clean up the test account; any thrown exceptions are caught and
* logged.
* @param testAccount test account
* @return null, so that any fields can be reset.
*/
public static AzureBlobStorageTestAccount cleanupTestAccount(
AzureBlobStorageTestAccount testAccount) {
if (testAccount != null) {
try {
testAccount.cleanup();
} catch (Exception e) {
LOG.error("While cleaning up test account: ", e);
}
}
return null;
}
/**
* Assume that the scale tests are enabled by the relevant system property.
*/
public static void assumeScaleTestsEnabled(Configuration conf) {
boolean enabled = getTestPropertyBool(
conf,
KEY_SCALE_TESTS_ENABLED,
DEFAULT_SCALE_TESTS_ENABLED);
assume("Scale test disabled: to enable set property "
+ KEY_SCALE_TESTS_ENABLED,
enabled);
}
}

View File

@ -0,0 +1,87 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.integration;
import java.util.EnumSet;
import com.microsoft.azure.storage.CloudStorageAccount;
import com.microsoft.azure.storage.blob.CloudBlobClient;
import com.microsoft.azure.storage.blob.CloudBlobContainer;
import org.junit.Test;
import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
/**
* This looks like a test, but it is really a command to invoke to
* clean up containers created in other test runs.
*
*/
public class CleanupTestContainers extends AbstractWasbTestBase {
private static final String CONTAINER_PREFIX = "wasbtests-";
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create(
"CleanupTestContainers",
EnumSet.noneOf(AzureBlobStorageTestAccount.CreateOptions.class),
createConfiguration(),
true);
}
@Test
public void testEnumContainers() throws Throwable {
describe("Enumerating all the WASB test containers");
int count = 0;
CloudStorageAccount storageAccount = getTestAccount().getRealAccount();
CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
Iterable<CloudBlobContainer> containers
= blobClient.listContainers(CONTAINER_PREFIX);
for (CloudBlobContainer container : containers) {
count++;
LOG.info("Container {} URI {}",
container.getName(),
container.getUri());
}
LOG.info("Found {} test containers", count);
}
@Test
public void testDeleteContainers() throws Throwable {
describe("Delete all the WASB test containers");
int count = 0;
CloudStorageAccount storageAccount = getTestAccount().getRealAccount();
CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
Iterable<CloudBlobContainer> containers
= blobClient.listContainers(CONTAINER_PREFIX);
for (CloudBlobContainer container : containers) {
LOG.info("Container {} URI {}",
container.getName(),
container.getUri());
if (container.deleteIfExists()) {
count++;
}
}
LOG.info("Deleted {} test containers", count);
}
}

View File

@ -0,0 +1,456 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.integration;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Iterator;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageStatistics;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.io.IOUtils;
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*;
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
/**
* Scale test which creates a huge file.
*
* <b>Important:</b> the order in which these tests execute is fixed to
* alphabetical order. Test cases are numbered {@code test_123_} to impose
* an ordering based on the numbers.
*
* Having this ordering allows the tests to assume that the huge file
* exists. Even so: they should all have a {@link #assumeHugeFileExists()}
* check at the start, in case an individual test is executed.
*
* <b>Ignore checkstyle complaints about naming: we need a scheme with visible
* ordering.</b>
*/
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class ITestAzureHugeFiles extends AbstractAzureScaleTest {
private static final Logger LOG = LoggerFactory.getLogger(
ITestAzureHugeFiles.class);
private Path scaleTestDir;
private Path hugefile;
private Path hugefileRenamed;
private AzureBlobStorageTestAccount testAccountForCleanup;
private static final int UPLOAD_BLOCKSIZE = 64 * S_1K;
private static final byte[] SOURCE_DATA;
static {
SOURCE_DATA = dataset(UPLOAD_BLOCKSIZE, 0, S_256);
}
private Path testPath;
@Override
public void setUp() throws Exception {
super.setUp();
testPath = path("ITestAzureHugeFiles");
scaleTestDir = new Path(testPath, "scale");
hugefile = new Path(scaleTestDir, "hugefile");
hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
}
/**
* Only clean up the test account (and delete the container) if the account
* is set in the field {@code testAccountForCleanup}.
* @throws Exception
*/
@Override
public void tearDown() throws Exception {
testAccount = null;
super.tearDown();
if (testAccountForCleanup != null) {
cleanupTestAccount(testAccount);
}
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create(
"testazurehugefiles",
EnumSet.of(AzureBlobStorageTestAccount.CreateOptions.CreateContainer),
createConfiguration(),
true);
}
/**
* Stop the test-case teardown from deleting the test path.
* @throws IOException never
*/
protected void deleteTestDirInTeardown() throws IOException {
// this is a no-op, so the test file is preserved.
// the last test in the suite does the teardown
}
protected void deleteHugeFile() throws IOException {
describe("Deleting %s", hugefile);
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
getFileSystem().delete(hugefile, false);
timer.end("time to delete %s", hugefile);
}
/**
* Log how long an IOP took, by dividing the total time by the
* count of operations, printing in a human-readable form.
* @param operation operation being measured
* @param timer timing data
* @param count IOP count.
*/
protected void logTimePerIOP(String operation,
ContractTestUtils.NanoTimer timer,
long count) {
LOG.info("Time per {}: {} nS",
operation, toHuman(timer.duration() / count));
}
/**
* Assume that the huge file exists, skip if not/empty.
* @return the file status
* @throws IOException IO failure
*/
FileStatus assumeHugeFileExists() throws IOException {
assertPathExists(getFileSystem(), "huge file not created", hugefile);
try {
FileStatus status = getFileSystem().getFileStatus(hugefile);
Assume.assumeTrue("Not a file: " + status, status.isFile());
Assume.assumeTrue("File " + hugefile + " is empty", status.getLen() > 0);
return status;
} catch (FileNotFoundException e) {
skip("huge file not created: " + hugefile);
}
return null;
}
/**
* If/when {@link NativeAzureFileSystem#getStorageStatistics()} returns
* statistics, this will be interesting.
*/
private void logFSState() {
StorageStatistics statistics = getFileSystem().getStorageStatistics();
Iterator<StorageStatistics.LongStatistic> longStatistics
= statistics.getLongStatistics();
while (longStatistics.hasNext()) {
StorageStatistics.LongStatistic next = longStatistics.next();
LOG.info("{} = {}", next.getName(), next.getValue());
}
}
@Test
public void test_010_CreateHugeFile() throws IOException {
long filesize = getTestPropertyBytes(getConfiguration(),
KEY_HUGE_FILESIZE,
DEFAULT_HUGE_FILESIZE);
long filesizeMB = filesize / S_1M;
// clean up from any previous attempts
deleteHugeFile();
describe("Creating file %s of size %d MB", hugefile, filesizeMB);
// now do a check of available upload time, with a pessimistic bandwidth
// (that of remote upload tests). If the test times out then not only is
// the test outcome lost, as the follow-on tests continue, they will
// overlap with the ongoing upload test, for much confusion.
/*
int timeout = getTestTimeoutSeconds();
// assume 1 MB/s upload bandwidth
int bandwidth = _1MB;
long uploadTime = filesize / bandwidth;
assertTrue(String.format("Timeout set in %s seconds is too low;" +
" estimating upload time of %d seconds at 1 MB/s." +
" Rerun tests with -D%s=%d",
timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2),
uploadTime < timeout);
*/
assertEquals("File size set in " + KEY_HUGE_FILESIZE + " = " + filesize
+ " is not a multiple of " + UPLOAD_BLOCKSIZE,
0, filesize % UPLOAD_BLOCKSIZE);
byte[] data = SOURCE_DATA;
long blocks = filesize / UPLOAD_BLOCKSIZE;
long blocksPerMB = S_1M / UPLOAD_BLOCKSIZE;
// perform the upload.
// there's lots of logging here, so that a tail -f on the output log
// can give a view of what is happening.
NativeAzureFileSystem fs = getFileSystem();
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
long blocksPer10MB = blocksPerMB * 10;
fs.mkdirs(hugefile.getParent());
try (FSDataOutputStream out = fs.create(hugefile,
true,
UPLOAD_BLOCKSIZE,
null)) {
for (long block = 1; block <= blocks; block++) {
out.write(data);
long written = block * UPLOAD_BLOCKSIZE;
// every 10 MB and on file upload @ 100%, print some stats
if (block % blocksPer10MB == 0 || written == filesize) {
long percentage = written * 100 / filesize;
double elapsedTime = timer.elapsedTime() / NANOSEC;
double writtenMB = 1.0 * written / S_1M;
LOG.info(String.format("[%02d%%] Buffered %.2f MB out of %d MB;"
+ " elapsedTime=%.2fs; write to buffer bandwidth=%.2f MB/s",
percentage,
writtenMB,
filesizeMB,
elapsedTime,
writtenMB / elapsedTime));
}
}
// now close the file
LOG.info("Closing stream {}", out);
ContractTestUtils.NanoTimer closeTimer
= new ContractTestUtils.NanoTimer();
out.close();
closeTimer.end("time to close() output stream");
}
timer.end("time to write %d MB in blocks of %d",
filesizeMB, UPLOAD_BLOCKSIZE);
logFSState();
bandwidth(timer, filesize);
ContractTestUtils.assertPathExists(fs, "Huge file", hugefile);
FileStatus status = fs.getFileStatus(hugefile);
ContractTestUtils.assertIsFile(hugefile, status);
assertEquals("File size in " + status, filesize, status.getLen());
}
@Test
public void test_040_PositionedReadHugeFile() throws Throwable {
assumeHugeFileExists();
describe("Positioned reads of file %s", hugefile);
NativeAzureFileSystem fs = getFileSystem();
FileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
int ops = 0;
final int bufferSize = 8192;
byte[] buffer = new byte[bufferSize];
long eof = filesize - 1;
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
ContractTestUtils.NanoTimer readAtByte0, readAtByte0Again, readAtEOF;
try (FSDataInputStream in = openDataFile()) {
readAtByte0 = new ContractTestUtils.NanoTimer();
in.readFully(0, buffer);
readAtByte0.end("time to read data at start of file");
ops++;
readAtEOF = new ContractTestUtils.NanoTimer();
in.readFully(eof - bufferSize, buffer);
readAtEOF.end("time to read data at end of file");
ops++;
readAtByte0Again = new ContractTestUtils.NanoTimer();
in.readFully(0, buffer);
readAtByte0Again.end("time to read data at start of file again");
ops++;
LOG.info("Final stream state: {}", in);
}
long mb = Math.max(filesize / S_1M, 1);
logFSState();
timer.end("time to performed positioned reads of %d MB ", mb);
LOG.info("Time per positioned read = {} nS",
toHuman(timer.nanosPerOperation(ops)));
}
protected FSDataInputStream openDataFile() throws IOException {
NanoTimer openTimer = new NanoTimer();
FSDataInputStream inputStream = getFileSystem().open(hugefile,
UPLOAD_BLOCKSIZE);
openTimer.end("open data file");
return inputStream;
}
/**
* Work out the bandwidth in bytes/second.
* @param timer timer measuring the duration
* @param bytes bytes
* @return the number of bytes/second of the recorded operation
*/
public static double bandwidthInBytes(NanoTimer timer, long bytes) {
return bytes * NANOSEC / timer.duration();
}
@Test
public void test_050_readHugeFile() throws Throwable {
assumeHugeFileExists();
describe("Reading %s", hugefile);
NativeAzureFileSystem fs = getFileSystem();
FileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
long blocks = filesize / UPLOAD_BLOCKSIZE;
byte[] data = new byte[UPLOAD_BLOCKSIZE];
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
try (FSDataInputStream in = openDataFile()) {
for (long block = 0; block < blocks; block++) {
in.readFully(data);
}
LOG.info("Final stream state: {}", in);
}
long mb = Math.max(filesize / S_1M, 1);
timer.end("time to read file of %d MB ", mb);
LOG.info("Time per MB to read = {} nS",
toHuman(timer.nanosPerOperation(mb)));
bandwidth(timer, filesize);
logFSState();
}
@Test
public void test_060_openAndReadWholeFileBlocks() throws Throwable {
FileStatus status = assumeHugeFileExists();
int blockSize = S_1M;
describe("Open the test file and read it in blocks of size %d",
blockSize);
long len = status.getLen();
FSDataInputStream in = openDataFile();
NanoTimer timer2 = null;
long blockCount = 0;
long totalToRead = 0;
int resetCount = 0;
try {
byte[] block = new byte[blockSize];
timer2 = new NanoTimer();
long count = 0;
// implicitly rounding down here
blockCount = len / blockSize;
totalToRead = blockCount * blockSize;
long minimumBandwidth = S_128K;
int maxResetCount = 4;
resetCount = 0;
for (long i = 0; i < blockCount; i++) {
int offset = 0;
int remaining = blockSize;
long blockId = i + 1;
NanoTimer blockTimer = new NanoTimer();
int reads = 0;
while (remaining > 0) {
NanoTimer readTimer = new NanoTimer();
int bytesRead = in.read(block, offset, remaining);
reads++;
if (bytesRead == 1) {
break;
}
remaining -= bytesRead;
offset += bytesRead;
count += bytesRead;
readTimer.end();
if (bytesRead != 0) {
LOG.debug("Bytes in read #{}: {} , block bytes: {},"
+ " remaining in block: {}"
+ " duration={} nS; ns/byte: {}, bandwidth={} MB/s",
reads, bytesRead, blockSize - remaining, remaining,
readTimer.duration(),
readTimer.nanosPerOperation(bytesRead),
readTimer.bandwidthDescription(bytesRead));
} else {
LOG.warn("0 bytes returned by read() operation #{}", reads);
}
}
blockTimer.end("Reading block %d in %d reads", blockId, reads);
String bw = blockTimer.bandwidthDescription(blockSize);
LOG.info("Bandwidth of block {}: {} MB/s: ", blockId, bw);
if (bandwidthInBytes(blockTimer, blockSize) < minimumBandwidth) {
LOG.warn("Bandwidth {} too low on block {}: resetting connection",
bw, blockId);
Assert.assertTrue("Bandwidth of " + bw + " too low after "
+ resetCount + " attempts", resetCount <= maxResetCount);
resetCount++;
// reset the connection
}
}
} finally {
IOUtils.closeStream(in);
}
timer2.end("Time to read %d bytes in %d blocks", totalToRead, blockCount);
LOG.info("Overall Bandwidth {} MB/s; reset connections {}",
timer2.bandwidth(totalToRead), resetCount);
}
@Test
public void test_100_renameHugeFile() throws Throwable {
assumeHugeFileExists();
describe("renaming %s to %s", hugefile, hugefileRenamed);
NativeAzureFileSystem fs = getFileSystem();
FileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
fs.delete(hugefileRenamed, false);
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
fs.rename(hugefile, hugefileRenamed);
long mb = Math.max(filesize / S_1M, 1);
timer.end("time to rename file of %d MB", mb);
LOG.info("Time per MB to rename = {} nS",
toHuman(timer.nanosPerOperation(mb)));
bandwidth(timer, filesize);
logFSState();
FileStatus destFileStatus = fs.getFileStatus(hugefileRenamed);
assertEquals(filesize, destFileStatus.getLen());
// rename back
ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
fs.rename(hugefileRenamed, hugefile);
timer2.end("Renaming back");
LOG.info("Time per MB to rename = {} nS",
toHuman(timer2.nanosPerOperation(mb)));
bandwidth(timer2, filesize);
}
@Test
public void test_999_deleteHugeFiles() throws IOException {
// mark the test account for cleanup after this test
testAccountForCleanup = testAccount;
deleteHugeFile();
ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
NativeAzureFileSystem fs = getFileSystem();
fs.delete(hugefileRenamed, false);
timer2.end("time to delete %s", hugefileRenamed);
rm(fs, testPath, true, false);
assertPathDoesNotExist(fs, "deleted huge file", testPath);
}
}

View File

@ -0,0 +1,43 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azure.integration;
/**
* Sizes of data.
* Checkstyle doesn't like the naming scheme or the fact its an interface.
*/
public interface Sizes {
int S_256 = 256;
int S_512 = 512;
int S_1K = 1024;
int S_4K = 4 * S_1K;
int S_8K = 8 * S_1K;
int S_16K = 16 * S_1K;
int S_32K = 32 * S_1K;
int S_64K = 64 * S_1K;
int S_128K = 128 * S_1K;
int S_256K = 256 * S_1K;
int S_1M = S_1K * S_1K;
int S_2M = 2 * S_1M;
int S_5M = 5 * S_1M;
int S_10M = 10* S_1M;
double NANOSEC = 1.0e9;
}

View File

@ -33,7 +33,6 @@ import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeNotNull;
import static org.mockito.Matchers.argThat;
import static org.mockito.Mockito.verify;
@ -44,6 +43,7 @@ import java.util.Date;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AbstractWasbTestBase;
import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount;
import org.apache.hadoop.fs.azure.AzureException;
import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore;
@ -53,39 +53,31 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsTag;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestAzureFileSystemInstrumentation {
private FileSystem fs;
private AzureBlobStorageTestAccount testAccount;
/**
* Instrumentation test, changing state of time and verifying metrics are
* consistent.
*/
public class ITestAzureFileSystemInstrumentation extends AbstractWasbTestBase {
@Before
public void setUp() throws Exception {
testAccount = AzureBlobStorageTestAccount.create();
if (testAccount != null) {
fs = testAccount.getFileSystem();
}
assumeNotNull(testAccount);
}
protected static final Logger LOG =
LoggerFactory.getLogger(ITestAzureFileSystemInstrumentation.class);
@After
public void tearDown() throws Exception {
if (testAccount != null) {
testAccount.cleanup();
testAccount = null;
fs = null;
}
@Override
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
return AzureBlobStorageTestAccount.create();
}
@Test
public void testMetricTags() throws Exception {
String accountName =
testAccount.getRealAccount().getBlobEndpoint()
getTestAccount().getRealAccount().getBlobEndpoint()
.getAuthority();
String containerName =
testAccount.getRealContainer().getName();
getTestAccount().getRealContainer().getName();
MetricsRecordBuilder myMetrics = getMyMetrics();
verify(myMetrics).add(argThat(
new TagMatcher("accountName", accountName)
@ -119,14 +111,14 @@ public class TestAzureFileSystemInstrumentation {
AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_DIRECTORIES_CREATED));
// List the root contents
assertEquals(1, fs.listStatus(new Path("/")).length);
assertEquals(1, getFileSystem().listStatus(new Path("/")).length);
base = assertWebResponsesEquals(base, 1);
assertNoErrors();
}
private BandwidthGaugeUpdater getBandwidthGaugeUpdater() {
NativeAzureFileSystem azureFs = (NativeAzureFileSystem)fs;
NativeAzureFileSystem azureFs = (NativeAzureFileSystem) getFileSystem();
AzureNativeFileSystemStore azureStore = azureFs.getStore();
return azureStore.getBandwidthGaugeUpdater();
}
@ -152,7 +144,7 @@ public class TestAzureFileSystemInstrumentation {
// Create a file
Date start = new Date();
OutputStream outputStream = fs.create(filePath);
OutputStream outputStream = getFileSystem().create(filePath);
outputStream.write(nonZeroByteArray(FILE_SIZE));
outputStream.close();
long uploadDurationMs = new Date().getTime() - start.getTime();
@ -177,7 +169,7 @@ public class TestAzureFileSystemInstrumentation {
" bytes plus a little overhead.",
totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
LOG.info("Upload rate: " + uploadRate + " bytes/second.");
long expectedRate = (FILE_SIZE * 1000L) / uploadDurationMs;
assertTrue("The upload rate " + uploadRate +
" is below the expected range of around " + expectedRate +
@ -187,7 +179,7 @@ public class TestAzureFileSystemInstrumentation {
uploadRate >= expectedRate);
long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
LOG.info("Upload latency: {}", uploadLatency);
long expectedLatency = uploadDurationMs; // We're uploading less than a block.
assertTrue("The upload latency " + uploadLatency +
" should be greater than zero now that I've just uploaded a file.",
@ -201,7 +193,7 @@ public class TestAzureFileSystemInstrumentation {
// Read the file
start = new Date();
InputStream inputStream = fs.open(filePath);
InputStream inputStream = getFileSystem().open(filePath);
int count = 0;
while (inputStream.read() >= 0) {
count++;
@ -224,7 +216,7 @@ public class TestAzureFileSystemInstrumentation {
" bytes plus a little overhead.",
bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
LOG.info("Download rate: " + downloadRate + " bytes/second.");
expectedRate = (FILE_SIZE * 1000L) / downloadDurationMs;
assertTrue("The download rate " + downloadRate +
" is below the expected range of around " + expectedRate +
@ -234,7 +226,7 @@ public class TestAzureFileSystemInstrumentation {
downloadRate >= expectedRate);
long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
LOG.info("Download latency: " + downloadLatency);
expectedLatency = downloadDurationMs; // We're downloading less than a block.
assertTrue("The download latency " + downloadLatency +
" should be greater than zero now that I've just downloaded a file.",
@ -263,7 +255,7 @@ public class TestAzureFileSystemInstrumentation {
getBandwidthGaugeUpdater().suppressAutoUpdate();
// Create a file
OutputStream outputStream = fs.create(filePath);
OutputStream outputStream = getFileSystem().create(filePath);
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
@ -282,16 +274,16 @@ public class TestAzureFileSystemInstrumentation {
" bytes plus a little overhead.",
totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
LOG.info("Upload rate: " + uploadRate + " bytes/second.");
long uploadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
LOG.info("Upload latency: " + uploadLatency);
assertTrue("The upload latency " + uploadLatency +
" should be greater than zero now that I've just uploaded a file.",
uploadLatency > 0);
// Read the file
InputStream inputStream = fs.open(filePath);
InputStream inputStream = getFileSystem().open(filePath);
int count = 0;
while (inputStream.read() >= 0) {
count++;
@ -308,10 +300,10 @@ public class TestAzureFileSystemInstrumentation {
long totalBytesRead = AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE, totalBytesRead);
long downloadRate = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(), WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
LOG.info("Download rate: " + downloadRate + " bytes/second.");
long downloadLatency = AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),
WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
LOG.info("Download latency: " + downloadLatency);
assertTrue("The download latency " + downloadLatency +
" should be greater than zero now that I've just downloaded a file.",
downloadLatency > 0);
@ -326,13 +318,14 @@ public class TestAzureFileSystemInstrumentation {
// Create an empty file
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
assertTrue(fs.createNewFile(originalPath));
assertTrue(getFileSystem().createNewFile(originalPath));
logOpResponseCount("Creating an empty file", base);
base = assertWebResponsesInRange(base, 2, 20);
assertEquals(1, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_CREATED));
// Rename the file
assertTrue(fs.rename(originalPath, destinationPath));
assertTrue(
((FileSystem) getFileSystem()).rename(originalPath, destinationPath));
// Varies: at the time of writing this code it takes 7 requests/responses.
logOpResponseCount("Renaming a file", base);
base = assertWebResponsesInRange(base, 2, 15);
@ -347,7 +340,7 @@ public class TestAzureFileSystemInstrumentation {
Path filePath = new Path("/metricsTest_delete");
// Check existence
assertFalse(fs.exists(filePath));
assertFalse(getFileSystem().exists(filePath));
// At the time of writing this code it takes 2 requests/responses to
// check existence, which seems excessive, plus initial request for
// container check.
@ -355,17 +348,17 @@ public class TestAzureFileSystemInstrumentation {
base = assertWebResponsesInRange(base, 1, 3);
// Create an empty file
assertTrue(fs.createNewFile(filePath));
assertTrue(getFileSystem().createNewFile(filePath));
base = getCurrentWebResponses();
// Check existence again
assertTrue(fs.exists(filePath));
assertTrue(getFileSystem().exists(filePath));
logOpResponseCount("Checking file existence for existent file", base);
base = assertWebResponsesInRange(base, 1, 2);
// Delete the file
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_FILES_DELETED));
assertTrue(fs.delete(filePath, false));
assertTrue(getFileSystem().delete(filePath, false));
// At the time of writing this code it takes 4 requests/responses to
// delete, which seems excessive. Check for range 1-4 for now.
logOpResponseCount("Deleting a file", base);
@ -384,15 +377,16 @@ public class TestAzureFileSystemInstrumentation {
Path destDirName = new Path("/metricsTestDirectory_RenameFinal");
// Create an empty directory
assertTrue(fs.mkdirs(originalDirName));
assertTrue(getFileSystem().mkdirs(originalDirName));
base = getCurrentWebResponses();
// Create an inner file
assertTrue(fs.createNewFile(innerFileName));
assertTrue(getFileSystem().createNewFile(innerFileName));
base = getCurrentWebResponses();
// Rename the directory
assertTrue(fs.rename(originalDirName, destDirName));
assertTrue(getFileSystem().rename(originalDirName, destDirName));
// At the time of writing this code it takes 11 requests/responses
// to rename the directory with one file. Check for range 1-20 for now.
logOpResponseCount("Renaming a directory", base);
@ -401,6 +395,19 @@ public class TestAzureFileSystemInstrumentation {
assertNoErrors();
}
/**
* Recursive discovery of path depth
* @param path path to measure.
* @return depth, where "/" == 0.
*/
int depth(Path path) {
if (path.isRoot()) {
return 0;
} else {
return 1 + depth(path.getParent());
}
}
@Test
public void testClientErrorMetrics() throws Exception {
String fileName = "metricsTestFile_ClientError";
@ -410,8 +417,8 @@ public class TestAzureFileSystemInstrumentation {
String leaseID = null;
try {
// Create a file
outputStream = fs.create(filePath);
leaseID = testAccount.acquireShortLease(fileName);
outputStream = getFileSystem().create(filePath);
leaseID = getTestAccount().acquireShortLease(fileName);
try {
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
@ -424,15 +431,15 @@ public class TestAzureFileSystemInstrumentation {
assertEquals(0, AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(), WASB_SERVER_ERRORS));
} finally {
if(leaseID != null){
testAccount.releaseLease(leaseID, fileName);
getTestAccount().releaseLease(leaseID, fileName);
}
IOUtils.closeStream(outputStream);
}
}
private void logOpResponseCount(String opName, long base) {
System.out.println(opName + " took " + (getCurrentWebResponses() - base) +
" web responses to complete.");
LOG.info("{} took {} web responses to complete.",
opName, getCurrentWebResponses() - base);
}
/**
@ -448,7 +455,7 @@ public class TestAzureFileSystemInstrumentation {
* Gets the current value of the wasb_web_responses counter.
*/
private long getCurrentWebResponses() {
return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
return AzureMetricsTestUtil.getCurrentWebResponses(getInstrumentation());
}
/**
@ -496,7 +503,7 @@ public class TestAzureFileSystemInstrumentation {
}
private AzureFileSystemInstrumentation getInstrumentation() {
return ((NativeAzureFileSystem)fs).getInstrumentation();
return getFileSystem().getInstrumentation();
}
/**