@InterfaceAudience.Public @InterfaceStability.Evolving public class HdfsAdmin extends Object
Modifier and Type | Field and Description |
---|---|
static FsPermission |
TRASH_PERMISSION |
Constructor and Description |
---|
HdfsAdmin(URI uri,
Configuration conf)
Create a new HdfsAdmin client.
|
Modifier and Type | Method and Description |
---|---|
long |
addCacheDirective(CacheDirectiveInfo info,
EnumSet<CacheFlag> flags)
Add a new CacheDirectiveInfo.
|
void |
addCachePool(CachePoolInfo info)
Add a cache pool.
|
org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse[] |
addErasureCodingPolicies(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy[] policies)
Add Erasure coding policies to HDFS.
|
void |
allowSnapshot(Path path)
Allow snapshot on a directory.
|
void |
clearQuota(Path src)
Clear the namespace quota (count of files, directories and sym links) for a
directory.
|
void |
clearQuotaByStorageType(Path src,
StorageType type)
Clear the space quota by storage type for a directory.
|
void |
clearSpaceQuota(Path src)
Clear the storage space quota (size of files) for a directory.
|
void |
createEncryptionZone(Path path,
String keyName)
Deprecated.
|
void |
createEncryptionZone(Path path,
String keyName,
EnumSet<CreateEncryptionZoneFlag> flags)
Create an encryption zone rooted at an empty existing directory, using the
specified encryption key.
|
void |
disableErasureCodingPolicy(String ecPolicyName)
Disable erasure coding policy.
|
void |
disallowSnapshot(Path path)
Disallow snapshot on a directory.
|
void |
enableErasureCodingPolicy(String ecPolicyName)
Enable erasure coding policy.
|
Collection<? extends BlockStoragePolicySpi> |
getAllStoragePolicies()
Retrieve all the storage policies supported by HDFS file system.
|
EncryptionZone |
getEncryptionZoneForPath(Path path)
Get the path of the encryption zone for a given file or directory.
|
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo[] |
getErasureCodingPolicies()
Get the Erasure coding policies supported.
|
org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy |
getErasureCodingPolicy(Path path)
Get the erasure coding policy information for the specified path
|
org.apache.hadoop.fs.FileEncryptionInfo |
getFileEncryptionInfo(Path path)
Returns the FileEncryptionInfo on the HdfsFileStatus for the given path.
|
DFSInotifyEventInputStream |
getInotifyEventStream()
Exposes a stream of namesystem events.
|
DFSInotifyEventInputStream |
getInotifyEventStream(long lastReadTxid)
A version of
getInotifyEventStream() meant for advanced
users who are aware of HDFS edits up to lastReadTxid (e.g. |
KeyProvider |
getKeyProvider()
Get KeyProvider if present.
|
BlockStoragePolicySpi |
getStoragePolicy(Path src)
Query the effective storage policy ID for the given file or directory.
|
org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> |
listCacheDirectives(CacheDirectiveInfo filter)
List cache directives.
|
org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> |
listCachePools()
List all cache pools.
|
org.apache.hadoop.fs.RemoteIterator<EncryptionZone> |
listEncryptionZones()
Returns a RemoteIterator which can be used to list the encryption zones
in HDFS.
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
listOpenFiles()
Deprecated.
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
listOpenFiles(EnumSet<org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType> openFilesTypes)
Deprecated.
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
listOpenFiles(EnumSet<org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType> openFilesTypes,
String path) |
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus> |
listReencryptionStatus()
Returns a RemoteIterator which can be used to list all re-encryption
information.
|
void |
modifyCacheDirective(CacheDirectiveInfo info,
EnumSet<CacheFlag> flags)
Modify a CacheDirective.
|
void |
modifyCachePool(CachePoolInfo info)
Modify an existing cache pool.
|
void |
provisionEncryptionZoneTrash(Path path)
Provision a trash directory for a given encryption zone.
|
Path |
provisionSnapshotTrash(Path path)
Provision a trash directory for a given snapshottable directory.
|
void |
reencryptEncryptionZone(Path zone,
org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction action)
Performs re-encryption action for a given encryption zone.
|
void |
removeCacheDirective(long id)
Remove a CacheDirective.
|
void |
removeCachePool(String poolName)
Remove a cache pool.
|
void |
removeErasureCodingPolicy(String ecPolicyName)
Remove erasure coding policy.
|
void |
satisfyStoragePolicy(Path path)
Set the source path to the specified storage policy.
|
void |
setErasureCodingPolicy(Path path,
String ecPolicyName)
Set the source path to the specified erasure coding policy.
|
void |
setQuota(Path src,
long quota)
Set the namespace quota (count of files, directories, and sym links) for a
directory.
|
void |
setQuotaByStorageType(Path src,
StorageType type,
long quota)
Set the quota by storage type for a directory.
|
void |
setSpaceQuota(Path src,
long spaceQuota)
Set the storage space quota (size of files) for a directory.
|
void |
setStoragePolicy(Path src,
String policyName)
Set the source path to the specified storage policy.
|
void |
unsetErasureCodingPolicy(Path path)
Unset erasure coding policy from the directory.
|
void |
unsetStoragePolicy(Path src)
Unset the storage policy set for a given file or directory.
|
public static final FsPermission TRASH_PERMISSION
public HdfsAdmin(URI uri, Configuration conf) throws IOException
uri
- the unique URI of the HDFS file system to administerconf
- configurationIOException
- in the event the file system could not be createdpublic void setQuota(Path src, long quota) throws IOException
src
- the path to set the quota forquota
- the value to set for the quotaIOException
- in the event of errorpublic void clearQuota(Path src) throws IOException
src
- the path to clear the quota ofIOException
- in the event of errorpublic void setSpaceQuota(Path src, long spaceQuota) throws IOException
src
- the path to set the space quota ofspaceQuota
- the value to set for the space quotaIOException
- in the event of errorpublic void clearSpaceQuota(Path src) throws IOException
src
- the path to clear the space quota ofIOException
- in the event of errorpublic void setQuotaByStorageType(Path src, StorageType type, long quota) throws IOException
src
- the target directory to set the quota by storage typetype
- the storage type to set for quota by storage typequota
- the value to set for quota by storage typeIOException
- in the event of errorpublic void clearQuotaByStorageType(Path src, StorageType type) throws IOException
src
- the target directory to clear the quota by storage typetype
- the storage type to clear for quota by storage typeIOException
- in the event of errorpublic void allowSnapshot(Path path) throws IOException
path
- The path of the directory where snapshots will be taken.IOException
public Path provisionSnapshotTrash(Path path) throws IOException
path
- the root of the snapshottable directoryIOException
- if the trash directory can not be created.public void disallowSnapshot(Path path) throws IOException
path
- The path of the snapshottable directory.IOException
public long addCacheDirective(CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException
info
- Information about a directive to add.flags
- CacheFlag
s to use for this operation.IOException
- if the directive could not be addedpublic void modifyCacheDirective(CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException
info
- Information about the directive to modify. You must set the ID
to indicate which CacheDirective you want to modify.flags
- CacheFlag
s to use for this operation.IOException
- if the directive could not be modifiedpublic void removeCacheDirective(long id) throws IOException
id
- identifier of the CacheDirectiveInfo to removeIOException
- if the directive could not be removedpublic org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> listCacheDirectives(CacheDirectiveInfo filter) throws IOException
filter
- Filter parameters to use when listing the directives, null to
list all directives visible to us.IOException
public void addCachePool(CachePoolInfo info) throws IOException
info
- The request to add a cache pool.IOException
- If the request could not be completed.public void modifyCachePool(CachePoolInfo info) throws IOException
info
- The request to modify a cache pool.IOException
- If the request could not be completed.public void removeCachePool(String poolName) throws IOException
poolName
- Name of the cache pool to remove.IOException
- if the cache pool did not exist, or could not be removed.public org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> listCachePools() throws IOException
IOException
- If there was an error listing cache pools.public KeyProvider getKeyProvider() throws IOException
IOException
- on RPC exception to the NN.@Deprecated public void createEncryptionZone(Path path, String keyName) throws IOException, AccessControlException, FileNotFoundException
path
- The path of the root of the encryption zone. Must refer to
an empty, existing directory.keyName
- Name of key available at the KeyProvider.IOException
- if there was a general IO exceptionAccessControlException
- if the caller does not have access to pathFileNotFoundException
- if the path does not existpublic void createEncryptionZone(Path path, String keyName, EnumSet<CreateEncryptionZoneFlag> flags) throws IOException, AccessControlException, FileNotFoundException, HadoopIllegalArgumentException
CreateEncryptionZoneFlag
flags.path
- The path of the root of the encryption zone. Must refer to
an empty, existing directory.keyName
- Name of key available at the KeyProvider.flags
- flags for this operation.IOException
- if there was a general IO exceptionAccessControlException
- if the caller does not have access to pathFileNotFoundException
- if the path does not existHadoopIllegalArgumentException
- if the flags are invalidpublic void provisionEncryptionZoneTrash(Path path) throws IOException
path
- the root of the encryption zoneIOException
- if the trash directory can not be created.public EncryptionZone getEncryptionZoneForPath(Path path) throws IOException, AccessControlException
path
- The path to get the ez for.IOException
- if there was a general IO exceptionAccessControlException
- if the caller does not have access to pathpublic org.apache.hadoop.fs.RemoteIterator<EncryptionZone> listEncryptionZones() throws IOException
Since the list is fetched in batches, it does not represent a consistent snapshot of the entire list of encryption zones.
This method can only be called by HDFS superusers.
IOException
public void reencryptEncryptionZone(Path zone, org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction action) throws IOException
zone
- the root of the encryption zoneaction
- the re-encrypt actionIOException
- If any error occurs when handling re-encrypt action.public org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus> listReencryptionStatus() throws IOException
Since the list is fetched in batches, it does not represent a consistent snapshot of the entire list of encryption zones.
This method can only be called by HDFS superusers.
IOException
public org.apache.hadoop.fs.FileEncryptionInfo getFileEncryptionInfo(Path path) throws IOException
FileNotFoundException
- if the path does not exist.AccessControlException
- if no execute permission on parent path.IOException
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException
DFSInotifyEventInputStream
for information on stream usage.
See Event
for information on the available events.
Inotify users may want to tune the following HDFS parameters to
ensure that enough extra HDFS edits are saved to support inotify clients
that fall behind the current state of the namespace while reading events.
The default parameter values should generally be reasonable. If edits are
deleted before their corresponding events can be read, clients will see a
MissingEventsException
on
DFSInotifyEventInputStream
method calls.
It should generally be sufficient to tune these parameters:
dfs.namenode.num.extra.edits.retained
dfs.namenode.max.extra.edits.segments.retained
Parameters that affect the number of created segments and the number of
edits that are considered necessary, i.e. do not count towards the
dfs.namenode.num.extra.edits.retained quota):
dfs.namenode.checkpoint.period
dfs.namenode.checkpoint.txns
dfs.namenode.num.checkpoints.retained
dfs.ha.log-roll.period
It is recommended that local journaling be configured (dfs.namenode.edits.dir) for inotify (in addition to a shared journal) so that edit transfers from the shared journal can be avoided.
IOException
- If there was an error obtaining the stream.public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) throws IOException
getInotifyEventStream()
meant for advanced
users who are aware of HDFS edits up to lastReadTxid (e.g. because they
have access to an FSImage inclusive of lastReadTxid) and only want to read
events after this point.IOException
public void setStoragePolicy(Path src, String policyName) throws IOException
src
- The source path referring to either a directory or a file.policyName
- The name of the storage policy.IOException
public void unsetStoragePolicy(Path src) throws IOException
src
- file or directory path.IOException
public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException
src
- file or directory path.IOException
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies() throws IOException
IOException
public void setErasureCodingPolicy(Path path, String ecPolicyName) throws IOException
path
- The source path referring to a directory.ecPolicyName
- The erasure coding policy name for the directory.IOException
HadoopIllegalArgumentException
- if the specified EC policy is not
enabled on the clusterpublic org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy getErasureCodingPolicy(Path path) throws IOException
path
- IOException
public void satisfyStoragePolicy(Path path) throws IOException
path
- The source path referring to either a directory or a file.IOException
public org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo[] getErasureCodingPolicies() throws IOException
IOException
public void unsetErasureCodingPolicy(Path path) throws IOException
path
- The source path referring to a directory.IOException
public org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse[] addErasureCodingPolicies(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy[] policies) throws IOException
policies
- The user defined ec policy list to add.IOException
public void removeErasureCodingPolicy(String ecPolicyName) throws IOException
ecPolicyName
- The name of the policy to be removed.IOException
public void enableErasureCodingPolicy(String ecPolicyName) throws IOException
ecPolicyName
- The name of the policy to be enabled.IOException
public void disableErasureCodingPolicy(String ecPolicyName) throws IOException
ecPolicyName
- The name of the policy to be disabled.IOException
@Deprecated public org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.OpenFileEntry> listOpenFiles() throws IOException
Since the list is fetched in batches, it does not represent a consistent snapshot of the all open files.
This method can only be called by HDFS superusers.
IOException
@Deprecated public org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.OpenFileEntry> listOpenFiles(EnumSet<org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType> openFilesTypes) throws IOException
IOException
public org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.OpenFileEntry> listOpenFiles(EnumSet<org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType> openFilesTypes, String path) throws IOException
IOException
Copyright © 2023 Apache Software Foundation. All rights reserved.