HDFS-10914. Move remnants of oah.hdfs.client to hadoop-hdfs-client.

This commit is contained in:
Andrew Wang 2016-09-28 16:00:51 -07:00
parent 5f34402ada
commit 92e5e91598
4 changed files with 30 additions and 32 deletions

View File

@ -45,29 +45,28 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
/** /**
* The public API for performing administrative functions on HDFS. Those writing * The public API for performing administrative functions on HDFS. Those writing
* applications against HDFS should prefer this interface to directly accessing * applications against HDFS should prefer this interface to directly accessing
* functionality in DistributedFileSystem or DFSClient. * functionality in DistributedFileSystem or DFSClient.
* *
* Note that this is distinct from the similarly-named {@link DFSAdmin}, which * Note that this is distinct from the similarly-named DFSAdmin, which
* is a class that provides the functionality for the CLI `hdfs dfsadmin ...' * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
* commands. * commands.
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class HdfsAdmin { public class HdfsAdmin {
private DistributedFileSystem dfs; private DistributedFileSystem dfs;
private static final FsPermission TRASH_PERMISSION = new FsPermission( private static final FsPermission TRASH_PERMISSION = new FsPermission(
FsAction.ALL, FsAction.ALL, FsAction.ALL, true); FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
/** /**
* Create a new HdfsAdmin client. * Create a new HdfsAdmin client.
* *
* @param uri the unique URI of the HDFS file system to administer * @param uri the unique URI of the HDFS file system to administer
* @param conf configuration * @param conf configuration
* @throws IOException in the event the file system could not be created * @throws IOException in the event the file system could not be created
@ -80,11 +79,11 @@ public class HdfsAdmin {
dfs = (DistributedFileSystem)fs; dfs = (DistributedFileSystem)fs;
} }
} }
/** /**
* Set the namespace quota (count of files, directories, and sym links) for a * Set the namespace quota (count of files, directories, and sym links) for a
* directory. * directory.
* *
* @param src the path to set the quota for * @param src the path to set the quota for
* @param quota the value to set for the quota * @param quota the value to set for the quota
* @throws IOException in the event of error * @throws IOException in the event of error
@ -92,22 +91,22 @@ public class HdfsAdmin {
public void setQuota(Path src, long quota) throws IOException { public void setQuota(Path src, long quota) throws IOException {
dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET); dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
} }
/** /**
* Clear the namespace quota (count of files, directories and sym links) for a * Clear the namespace quota (count of files, directories and sym links) for a
* directory. * directory.
* *
* @param src the path to clear the quota of * @param src the path to clear the quota of
* @throws IOException in the event of error * @throws IOException in the event of error
*/ */
public void clearQuota(Path src) throws IOException { public void clearQuota(Path src) throws IOException {
dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET); dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
} }
/** /**
* Set the storage space quota (size of files) for a directory. Note that * Set the storage space quota (size of files) for a directory. Note that
* directories and sym links do not occupy storage space. * directories and sym links do not occupy storage space.
* *
* @param src the path to set the space quota of * @param src the path to set the space quota of
* @param spaceQuota the value to set for the space quota * @param spaceQuota the value to set for the space quota
* @throws IOException in the event of error * @throws IOException in the event of error
@ -115,11 +114,11 @@ public class HdfsAdmin {
public void setSpaceQuota(Path src, long spaceQuota) throws IOException { public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota); dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
} }
/** /**
* Clear the storage space quota (size of files) for a directory. Note that * Clear the storage space quota (size of files) for a directory. Note that
* directories and sym links do not occupy storage space. * directories and sym links do not occupy storage space.
* *
* @param src the path to clear the space quota of * @param src the path to clear the space quota of
* @throws IOException in the event of error * @throws IOException in the event of error
*/ */
@ -152,7 +151,7 @@ public class HdfsAdmin {
public void clearQuotaByStorageType(Path src, StorageType type) throws IOException { public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET); dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
} }
/** /**
* Allow snapshot on a directory. * Allow snapshot on a directory.
* @param path The path of the directory where snapshots will be taken. * @param path The path of the directory where snapshots will be taken.
@ -160,7 +159,7 @@ public class HdfsAdmin {
public void allowSnapshot(Path path) throws IOException { public void allowSnapshot(Path path) throws IOException {
dfs.allowSnapshot(path); dfs.allowSnapshot(path);
} }
/** /**
* Disallow snapshot on a directory. * Disallow snapshot on a directory.
* @param path The path of the snapshottable directory. * @param path The path of the snapshottable directory.
@ -171,7 +170,7 @@ public class HdfsAdmin {
/** /**
* Add a new CacheDirectiveInfo. * Add a new CacheDirectiveInfo.
* *
* @param info Information about a directive to add. * @param info Information about a directive to add.
* @param flags {@link CacheFlag}s to use for this operation. * @param flags {@link CacheFlag}s to use for this operation.
* @return the ID of the directive that was created. * @return the ID of the directive that was created.
@ -181,10 +180,10 @@ public class HdfsAdmin {
EnumSet<CacheFlag> flags) throws IOException { EnumSet<CacheFlag> flags) throws IOException {
return dfs.addCacheDirective(info, flags); return dfs.addCacheDirective(info, flags);
} }
/** /**
* Modify a CacheDirective. * Modify a CacheDirective.
* *
* @param info Information about the directive to modify. You must set the ID * @param info Information about the directive to modify. You must set the ID
* to indicate which CacheDirective you want to modify. * to indicate which CacheDirective you want to modify.
* @param flags {@link CacheFlag}s to use for this operation. * @param flags {@link CacheFlag}s to use for this operation.
@ -197,7 +196,7 @@ public class HdfsAdmin {
/** /**
* Remove a CacheDirective. * Remove a CacheDirective.
* *
* @param id identifier of the CacheDirectiveInfo to remove * @param id identifier of the CacheDirectiveInfo to remove
* @throws IOException if the directive could not be removed * @throws IOException if the directive could not be removed
*/ */
@ -208,7 +207,7 @@ public class HdfsAdmin {
/** /**
* List cache directives. Incrementally fetches results from the server. * List cache directives. Incrementally fetches results from the server.
* *
* @param filter Filter parameters to use when listing the directives, null to * @param filter Filter parameters to use when listing the directives, null to
* list all directives visible to us. * list all directives visible to us.
* @return A RemoteIterator which returns CacheDirectiveInfo objects. * @return A RemoteIterator which returns CacheDirectiveInfo objects.
@ -223,7 +222,7 @@ public class HdfsAdmin {
* *
* @param info * @param info
* The request to add a cache pool. * The request to add a cache pool.
* @throws IOException * @throws IOException
* If the request could not be completed. * If the request could not be completed.
*/ */
public void addCachePool(CachePoolInfo info) throws IOException { public void addCachePool(CachePoolInfo info) throws IOException {
@ -235,19 +234,19 @@ public class HdfsAdmin {
* *
* @param info * @param info
* The request to modify a cache pool. * The request to modify a cache pool.
* @throws IOException * @throws IOException
* If the request could not be completed. * If the request could not be completed.
*/ */
public void modifyCachePool(CachePoolInfo info) throws IOException { public void modifyCachePool(CachePoolInfo info) throws IOException {
dfs.modifyCachePool(info); dfs.modifyCachePool(info);
} }
/** /**
* Remove a cache pool. * Remove a cache pool.
* *
* @param poolName * @param poolName
* Name of the cache pool to remove. * Name of the cache pool to remove.
* @throws IOException * @throws IOException
* if the cache pool did not exist, or could not be removed. * if the cache pool did not exist, or could not be removed.
*/ */
public void removeCachePool(String poolName) throws IOException { public void removeCachePool(String poolName) throws IOException {

View File

@ -20,8 +20,7 @@ package org.apache.hadoop.hdfs.client;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import org.apache.commons.logging.Log; import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -30,7 +29,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.io.IOUtils; import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* The public utility API for HDFS. * The public utility API for HDFS.
@ -38,7 +38,7 @@ import org.apache.hadoop.io.IOUtils;
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class HdfsUtils { public class HdfsUtils {
private static final Log LOG = LogFactory.getLog(HdfsUtils.class); public static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class);
/** /**
* Is the HDFS healthy? * Is the HDFS healthy?
@ -54,7 +54,7 @@ public class HdfsUtils {
throw new IllegalArgumentException("The scheme is not " throw new IllegalArgumentException("The scheme is not "
+ HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri); + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
} }
final Configuration conf = new Configuration(); final Configuration conf = new Configuration();
//disable FileSystem cache //disable FileSystem cache
conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true); conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
@ -80,7 +80,7 @@ public class HdfsUtils {
} }
return false; return false;
} finally { } finally {
IOUtils.cleanup(LOG, fs); IOUtils.closeQuietly(fs);
} }
} }
} }