diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java index 4c3a36929f8..ed510f67cad 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java @@ -36,7 +36,8 @@ public interface KeyProvider { * @param conf * Hadoop configuration parameters * @return the plaintext storage account key - * @throws KeyProviderException + * @throws KeyProviderException Thrown if there is a problem instantiating a + * KeyProvider or retrieving a key using a KeyProvider object. */ String getStorageAccountKey(String accountName, Configuration conf) throws KeyProviderException; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index 161da519618..3b6b584acdd 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -268,7 +268,8 @@ public class NativeAzureFileSystem extends FileSystem { * "innerFile2" * ] * } } - * @throws IOException + * @param fs file system on which a file is written. + * @throws IOException Thrown when fail to write file. */ public void writeFile(FileSystem fs) throws IOException { Path path = getRenamePendingFilePath(); @@ -292,6 +293,8 @@ public class NativeAzureFileSystem extends FileSystem { /** * Return the contents of the JSON file to represent the operations * to be performed for a folder rename. + * + * @return JSON string which represents the operation. */ public String makeRenamePendingFileContents() { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); @@ -418,7 +421,7 @@ public class NativeAzureFileSystem extends FileSystem { * when everything is working normally. See redo() for the alternate * execution path for the case where we're recovering from a folder rename * failure. - * @throws IOException + * @throws IOException Thrown when fail to renaming. */ public void execute() throws IOException { @@ -472,7 +475,8 @@ public class NativeAzureFileSystem extends FileSystem { } /** Clean up after execution of rename. - * @throws IOException */ + * @throws IOException Thrown when fail to clean up. + * */ public void cleanup() throws IOException { if (fs.getStoreInterface().isAtomicRenameKey(srcKey)) { @@ -496,7 +500,7 @@ public class NativeAzureFileSystem extends FileSystem { * Recover from a folder rename failure by redoing the intended work, * as recorded in the -RenamePending.json file. * - * @throws IOException + * @throws IOException Thrown when fail to redo. */ public void redo() throws IOException { @@ -1120,6 +1124,7 @@ public class NativeAzureFileSystem extends FileSystem { /** * Creates a new metrics source name that's unique within this process. + * @return metric source name */ @VisibleForTesting public static String newMetricsSourceName() { @@ -1253,6 +1258,8 @@ public class NativeAzureFileSystem extends FileSystem { /** * Convert the path to a key. By convention, any leading or trailing slash is * removed, except for the special case of a single slash. + * @param path path converted to a key + * @return key string */ @VisibleForTesting public String pathToKey(Path path) { @@ -1307,7 +1314,7 @@ public class NativeAzureFileSystem extends FileSystem { * Get the absolute version of the path (fully qualified). * This is public for testing purposes. * - * @param path + * @param path path to be absolute path. * @return fully qualified path */ @VisibleForTesting @@ -1415,6 +1422,8 @@ public class NativeAzureFileSystem extends FileSystem { /** * Get a self-renewing lease on the specified file. + * @param path path whose lease to be renewed. + * @return Lease */ public SelfRenewingLease acquireLease(Path path) throws AzureException { String fullKey = pathToKey(makeAbsolute(path)); @@ -1662,12 +1671,12 @@ public class NativeAzureFileSystem extends FileSystem { * modified time is not necessary, it's easier to just skip * the modified time update. * - * @param f - * @param recursive + * @param f file path to be deleted. + * @param recursive specify deleting recursively or not. * @param skipParentFolderLastModifidedTimeUpdate If true, don't update the folder last * modified time. * @return true if and only if the file is deleted - * @throws IOException + * @throws IOException Thrown when fail to delete file or directory. */ public boolean delete(Path f, boolean recursive, boolean skipParentFolderLastModifidedTimeUpdate) throws IOException { @@ -2890,7 +2899,7 @@ public class NativeAzureFileSystem extends FileSystem { * The root path to consider. * @param destination * The destination path to move any recovered files to. - * @throws IOException + * @throws IOException Thrown when fail to recover files. */ public void recoverFilesWithDanglingTempData(Path root, Path destination) throws IOException { @@ -2908,7 +2917,7 @@ public class NativeAzureFileSystem extends FileSystem { * * @param root * The root path to consider. - * @throws IOException + * @throws IOException Thrown when fail to delete. */ public void deleteFilesWithDanglingTempData(Path root) throws IOException { @@ -2928,7 +2937,7 @@ public class NativeAzureFileSystem extends FileSystem { * Upload data to a random temporary file then do storage side renaming to * recover the original key. * - * @param aKey + * @param aKey a key to be encoded. * @return Encoded version of the original key. */ private static String encodeKey(String aKey) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java index 454a5df8658..611fe1afdf2 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java @@ -93,7 +93,8 @@ interface NativeFileSystemStore { /** * Delete all keys with the given prefix. Used for testing. * - * @throws IOException + * @param prefix prefix of objects to be deleted. + * @throws IOException Exception encountered while deleting keys. */ @VisibleForTesting void purge(String prefix) throws IOException; @@ -101,7 +102,7 @@ interface NativeFileSystemStore { /** * Diagnostic method to dump state to the console. * - * @throws IOException + * @throws IOException Exception encountered while dumping to console. */ void dump() throws IOException; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorInterface.java index 8d871eb86c6..3067c1096df 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorInterface.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorInterface.java @@ -41,7 +41,8 @@ public interface SASKeyGeneratorInterface { * @param container * - Container name within the storage account. * @return SAS URI for the container. - * @throws SASKeyGenerationException + * @throws SASKeyGenerationException Exception that gets thrown during + * generation of SAS Key. */ URI getContainerSASUri(String accountName, String container) throws SASKeyGenerationException; @@ -57,7 +58,8 @@ public interface SASKeyGeneratorInterface { * @param relativePath * - Relative path within the container * @return SAS URI for the relative path blob. - * @throws SASKeyGenerationException + * @throws SASKeyGenerationException Exception that gets thrown during + * generation of SAS Key. */ URI getRelativeBlobSASUri(String accountName, String container, String relativePath) throws SASKeyGenerationException; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 900d7301bf9..76098f3a9c3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -111,7 +111,7 @@ public class SelfRenewingLease { /** * Free the lease and stop the keep-alive thread. - * @throws StorageException + * @throws StorageException Thrown when fail to free the lease. */ public void free() throws StorageException { AccessCondition accessCondition = AccessCondition.generateEmptyCondition(); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java index f86f392c794..25c9eb42ea1 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java @@ -94,7 +94,8 @@ public final class SendRequestIntercept extends StorageEventnull * will use the default request options from the associated service - * client ( {@link CloudBlobClient}). + * client ({@link com.microsoft.azure.storage.blob.CloudBlobClient}). * @param opContext * An {@link OperationContext} object that represents the context * for the current operation. This object is used to track requests @@ -207,8 +210,9 @@ abstract class StorageInterface { } /** - * A thin wrapper over the {@link CloudBlobContainer} class that simply - * redirects calls to the real object except in unit tests. + * A thin wrapper over the + * {@link com.microsoft.azure.storage.blob.CloudBlobContainer} class + * that simply redirects calls to the real object except in unit tests. */ @InterfaceAudience.Private public abstract static class CloudBlobContainerWrapper { @@ -608,8 +612,9 @@ abstract class StorageInterface { } /** - * A thin wrapper over the {@link CloudBlockBlob} class that simply redirects calls - * to the real object except in unit tests. + * A thin wrapper over the + * {@link com.microsoft.azure.storage.blob.CloudBlockBlob} class + * that simply redirects calls to the real object except in unit tests. */ public abstract interface CloudBlockBlobWrapper extends CloudBlobWrapper { @@ -690,8 +695,9 @@ abstract class StorageInterface { } /** - * A thin wrapper over the {@link CloudPageBlob} class that simply redirects calls - * to the real object except in unit tests. + * A thin wrapper over the + * {@link com.microsoft.azure.storage.blob.CloudPageBlob} + * class that simply redirects calls to the real object except in unit tests. */ public abstract interface CloudPageBlobWrapper extends CloudBlobWrapper { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemInstrumentation.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemInstrumentation.java index a08ad7195bd..6cce271b227 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemInstrumentation.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemInstrumentation.java @@ -187,6 +187,7 @@ public final class AzureFileSystemInstrumentation implements MetricsSource { /** * The unique identifier for this file system in the metrics. + * @return The unique identifier. */ public UUID getFileSystemInstanceId() { return fileSystemInstanceId; @@ -194,6 +195,7 @@ public final class AzureFileSystemInstrumentation implements MetricsSource { /** * Get the metrics registry information. + * @return The metrics registry information. */ public MetricsInfo getMetricsRegistryInfo() { return registry.info();