hadoop/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml

2114 lines
94 KiB
XML
Raw Normal View History

<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<!-- Generated by the JDiff Javadoc doclet -->
<!-- (http://www.jdiff.org) -->
<!-- on Wed Aug 24 13:54:04 PDT 2016 -->
<api
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xsi:noNamespaceSchemaLocation='api.xsd'
name="Apache Hadoop HDFS 2.7.2"
jdversion="1.0.9">
<!-- Command line arguments = -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2
<package name="org.apache.hadoop.fs">
<!-- start class org.apache.hadoop.fs.BlockStorageLocation -->
<class name="BlockStorageLocation" extends="org.apache.hadoop.fs.BlockLocation"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockStorageLocation" type="org.apache.hadoop.fs.BlockLocation, org.apache.hadoop.fs.VolumeId[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getVolumeIds" return="org.apache.hadoop.fs.VolumeId[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the list of {@link VolumeId} corresponding to the block's replicas.
@return volumeIds list of VolumeId for the block's replicas]]>
</doc>
</method>
<doc>
<![CDATA[Wrapper for {@link BlockLocation} that also adds {@link VolumeId} volume
location information for each replica.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.BlockStorageLocation -->
<!-- start class org.apache.hadoop.fs.CacheFlag -->
<class name="CacheFlag" extends="java.lang.Enum"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.fs.CacheFlag[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.fs.CacheFlag"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Specifies semantics for CacheDirective operations. Multiple flags can
be combined in an EnumSet.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.CacheFlag -->
<!-- start class org.apache.hadoop.fs.HdfsVolumeId -->
<class name="HdfsVolumeId" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.fs.VolumeId"/>
<constructor name="HdfsVolumeId" type="byte[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="compareTo" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="arg0" type="org.apache.hadoop.fs.VolumeId"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[HDFS-specific volume identifier which implements {@link VolumeId}. Can be
used to differentiate between the data directories on a single datanode. This
identifier is only unique on a per-datanode basis.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.HdfsVolumeId -->
<!-- start interface org.apache.hadoop.fs.VolumeId -->
<interface name="VolumeId" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.lang.Comparable"/>
<method name="compareTo" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="arg0" type="org.apache.hadoop.fs.VolumeId"/>
</method>
<method name="hashCode" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<doc>
<![CDATA[Opaque interface that identifies a disk location. Subclasses
should implement {@link Comparable} and override both equals and hashCode.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.fs.VolumeId -->
</package>
<package name="org.apache.hadoop.hdfs">
<!-- start class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
<class name="DFSInotifyEventInputStream" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="poll" return="org.apache.hadoop.hdfs.inotify.EventBatch"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next batch of events in the stream or null if no new
batches are currently available.
@throws IOException because of network error or edit log
corruption. Also possible if JournalNodes are unresponsive in the
QJM setting (even one unresponsive JournalNode is enough in rare cases),
so catching this exception and retrying at least a few times is
recommended.
@throws MissingEventsException if we cannot return the next batch in the
stream because the data for the events (and possibly some subsequent
events) has been deleted (generally because this stream is a very large
number of transactions behind the current state of the NameNode). It is
safe to continue reading from the stream after this exception is thrown
The next available batch of events will be returned.]]>
</doc>
</method>
<method name="getTxidsBehindEstimate" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return a estimate of how many transaction IDs behind the NameNode's
current state this stream is. Clients should periodically call this method
and check if its result is steadily increasing, which indicates that they
are falling behind (i.e. transaction are being generated faster than the
client is reading them). If a client falls too far behind events may be
deleted before the client can read them.
<p/>
A return value of -1 indicates that an estimate could not be produced, and
should be ignored. The value returned by this method is really only useful
when compared to previous or subsequent returned values.]]>
</doc>
</method>
<method name="poll" return="org.apache.hadoop.hdfs.inotify.EventBatch"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="time" type="long"/>
<param name="tu" type="java.util.concurrent.TimeUnit"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next event batch in the stream, waiting up to the specified
amount of time for a new batch. Returns null if one is not available at the
end of the specified amount of time. The time before the method returns may
exceed the specified amount of time by up to the time required for an RPC
to the NameNode.
@param time number of units of the given TimeUnit to wait
@param tu the desired TimeUnit
@throws IOException see {@link DFSInotifyEventInputStream#poll()}
@throws MissingEventsException
see {@link DFSInotifyEventInputStream#poll()}
@throws InterruptedException if the calling thread is interrupted]]>
</doc>
</method>
<method name="take" return="org.apache.hadoop.hdfs.inotify.EventBatch"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next batch of events in the stream, waiting indefinitely if
a new batch is not immediately available.
@throws IOException see {@link DFSInotifyEventInputStream#poll()}
@throws MissingEventsException see
{@link DFSInotifyEventInputStream#poll()}
@throws InterruptedException if the calling thread is interrupted]]>
</doc>
</method>
<field name="LOG" type="org.slf4j.Logger"
transient="false" volatile="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Stream for reading inotify events. DFSInotifyEventInputStreams should not
be shared among multiple threads.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
<!-- start class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
<class name="UnknownCipherSuiteException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnknownCipherSuiteException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Thrown when an unknown cipher suite is encountered.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
<!-- start class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
<class name="UnknownCryptoProtocolVersionException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnknownCryptoProtocolVersionException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="UnknownCryptoProtocolVersionException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
</class>
<!-- end class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
<doc>
<![CDATA[<p>A distributed implementation of {@link
org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
<p>The most important difference is that unlike GFS, Hadoop DFS files
have strictly one writer at any one time. Bytes are always appended
to the end of the writer's stream. There is no notion of "record appends"
or "mutations" that are then checked or reordered. Writers simply emit
a byte stream. That byte stream is guaranteed to be stored in the
order written.</p>]]>
</doc>
</package>
<package name="org.apache.hadoop.hdfs.client">
<!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions -->
<class name="BlockReportOptions" extends="java.lang.Object"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="isIncremental" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Options that can be specified when manually triggering a block report.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.BlockReportOptions -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsAdmin -->
<class name="HdfsAdmin" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsAdmin" type="java.net.URI, org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new HdfsAdmin client.
@param uri the unique URI of the HDFS file system to administer
@param conf configuration
@throws IOException in the event the file system could not be created]]>
</doc>
</constructor>
<method name="setQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="quota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the namespace quota (count of files, directories, and sym links) for a
directory.
@param src the path to set the quota for
@param quota the value to set for the quota
@throws IOException in the event of error]]>
</doc>
</method>
<method name="clearQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Clear the namespace quota (count of files, directories and sym links) for a
directory.
@param src the path to clear the quota of
@throws IOException in the event of error]]>
</doc>
</method>
<method name="setSpaceQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="spaceQuota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the storage space quota (size of files) for a directory. Note that
directories and sym links do not occupy storage space.
@param src the path to set the space quota of
@param spaceQuota the value to set for the space quota
@throws IOException in the event of error]]>
</doc>
</method>
<method name="clearSpaceQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Clear the storage space quota (size of files) for a directory. Note that
directories and sym links do not occupy storage space.
@param src the path to clear the space quota of
@throws IOException in the event of error]]>
</doc>
</method>
<method name="setQuotaByStorageType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="type" type="org.apache.hadoop.fs.StorageType"/>
<param name="quota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the quota by storage type for a directory. Note that
directories and sym links do not occupy storage type quota.
@param src the target directory to set the quota by storage type
@param type the storage type to set for quota by storage type
@param quota the value to set for quota by storage type
@throws IOException in the event of error]]>
</doc>
</method>
<method name="clearQuotaByStorageType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="type" type="org.apache.hadoop.fs.StorageType"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Clear the space quota by storage type for a directory. Note that
directories and sym links do not occupy storage type quota.
@param src the target directory to clear the quota by storage type
@param type the storage type to clear for quota by storage type
@throws IOException in the event of error]]>
</doc>
</method>
<method name="allowSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Allow snapshot on a directory.
@param path The path of the directory where snapshots will be taken.]]>
</doc>
</method>
<method name="disallowSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Disallow snapshot on a directory.
@param path The path of the snapshottable directory.]]>
</doc>
</method>
<method name="addCacheDirective" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<param name="flags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Add a new CacheDirectiveInfo.
@param info Information about a directive to add.
@param flags {@link CacheFlag}s to use for this operation.
@return the ID of the directive that was created.
@throws IOException if the directive could not be added]]>
</doc>
</method>
<method name="modifyCacheDirective"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<param name="flags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Modify a CacheDirective.
@param info Information about the directive to modify. You must set the ID
to indicate which CacheDirective you want to modify.
@param flags {@link CacheFlag}s to use for this operation.
@throws IOException if the directive could not be modified]]>
</doc>
</method>
<method name="removeCacheDirective"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Remove a CacheDirective.
@param id identifier of the CacheDirectiveInfo to remove
@throws IOException if the directive could not be removed]]>
</doc>
</method>
<method name="listCacheDirectives" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filter" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[List cache directives. Incrementally fetches results from the server.
@param filter Filter parameters to use when listing the directives, null to
list all directives visible to us.
@return A RemoteIterator which returns CacheDirectiveInfo objects.]]>
</doc>
</method>
<method name="addCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Add a cache pool.
@param info
The request to add a cache pool.
@throws IOException
If the request could not be completed.]]>
</doc>
</method>
<method name="modifyCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Modify an existing cache pool.
@param info
The request to modify a cache pool.
@throws IOException
If the request could not be completed.]]>
</doc>
</method>
<method name="removeCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="poolName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Remove a cache pool.
@param poolName
Name of the cache pool to remove.
@throws IOException
if the cache pool did not exist, or could not be removed.]]>
</doc>
</method>
<method name="listCachePools" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[List all cache pools.
@return A remote iterator from which you can get CachePoolEntry objects.
Requests will be made as needed.
@throws IOException
If there was an error listing cache pools.]]>
</doc>
</method>
<method name="createEncryptionZone"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="keyName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
<exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
<doc>
<![CDATA[Create an encryption zone rooted at an empty existing directory, using the
specified encryption key. An encryption zone has an associated encryption
key used when reading and writing files within the zone.
@param path The path of the root of the encryption zone. Must refer to
an empty, existing directory.
@param keyName Name of key available at the KeyProvider.
@throws IOException if there was a general IO exception
@throws AccessControlException if the caller does not have access to path
@throws FileNotFoundException if the path does not exist]]>
</doc>
</method>
<method name="getEncryptionZoneForPath" return="org.apache.hadoop.hdfs.protocol.EncryptionZone"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
<exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
<doc>
<![CDATA[Get the path of the encryption zone for a given file or directory.
@param path The path to get the ez for.
@return The EncryptionZone of the ez, or null if path is not in an ez.
@throws IOException if there was a general IO exception
@throws AccessControlException if the caller does not have access to path
@throws FileNotFoundException if the path does not exist]]>
</doc>
</method>
<method name="listEncryptionZones" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns a RemoteIterator which can be used to list the encryption zones
in HDFS. For large numbers of encryption zones, the iterator will fetch
the list of zones in a number of small batches.
<p/>
Since the list is fetched in batches, it does not represent a
consistent snapshot of the entire list of encryption zones.
<p/>
This method can only be called by HDFS superusers.]]>
</doc>
</method>
<method name="getInotifyEventStream" return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Exposes a stream of namesystem events. Only events occurring after the
stream is created are available.
See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
for information on stream usage.
See {@link org.apache.hadoop.hdfs.inotify.Event}
for information on the available events.
<p/>
Inotify users may want to tune the following HDFS parameters to
ensure that enough extra HDFS edits are saved to support inotify clients
that fall behind the current state of the namespace while reading events.
The default parameter values should generally be reasonable. If edits are
deleted before their corresponding events can be read, clients will see a
{@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
{@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
It should generally be sufficient to tune these parameters:
dfs.namenode.num.extra.edits.retained
dfs.namenode.max.extra.edits.segments.retained
Parameters that affect the number of created segments and the number of
edits that are considered necessary, i.e. do not count towards the
dfs.namenode.num.extra.edits.retained quota):
dfs.namenode.checkpoint.period
dfs.namenode.checkpoint.txns
dfs.namenode.num.checkpoints.retained
dfs.ha.log-roll.period
<p/>
It is recommended that local journaling be configured
(dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
so that edit transfers from the shared journal can be avoided.
@throws IOException If there was an error obtaining the stream.]]>
</doc>
</method>
<method name="getInotifyEventStream" return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="lastReadTxid" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
users who are aware of HDFS edits up to lastReadTxid (e.g. because they
have access to an FSImage inclusive of lastReadTxid) and only want to read
events after this point.]]>
</doc>
</method>
<method name="setStoragePolicy"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="policyName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the source path to the specified storage policy.
@param src The source path referring to either a directory or a file.
@param policyName The name of the storage policy.]]>
</doc>
</method>
<doc>
<![CDATA[The public API for performing administrative functions on HDFS. Those writing
applications against HDFS should prefer this interface to directly accessing
functionality in DistributedFileSystem or DFSClient.
Note that this is distinct from the similarly-named {@link DFSAdmin}, which
is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
commands.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsAdmin -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
<class name="HdfsDataInputStream" extends="org.apache.hadoop.fs.FSDataInputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataInputStream" type="org.apache.hadoop.crypto.CryptoInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getWrappedStream" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get a reference to the wrapped output stream. We always want to return the
actual underlying InputStream, even when we're using a CryptoStream. e.g.
in the delegated methods below.
@return the underlying output stream]]>
</doc>
</method>
<method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the datanode from which the stream is currently reading.]]>
</doc>
</method>
<method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.ExtendedBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the block containing the target position.]]>
</doc>
</method>
<method name="getAllBlocks" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the collection of blocks that has already been located.]]>
</doc>
</method>
<method name="getVisibleLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the visible length of the file. It will include the length of the last
block even if that is in UnderConstruction state.
@return The visible length of the file.]]>
</doc>
</method>
<method name="getReadStatistics" return="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get statistics about the reads which this DFSInputStream has done.
Note that because HdfsDataInputStream is buffered, these stats may
be higher than you would expect just by adding up the number of
bytes read through HdfsDataInputStream.]]>
</doc>
</method>
<method name="clearReadStatistics"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[The Hdfs implementation of {@link FSDataInputStream}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
<class name="HdfsDataOutputStream" extends="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.hdfs.DFSOutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.hdfs.DFSOutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.crypto.CryptoOutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.crypto.CryptoOutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getCurrentBlockReplication" return="int"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the actual number of replicas of the current block.
This can be different from the designated replication factor of the file
because the namenode does not maintain replication for the blocks which are
currently being written to. Depending on the configuration, the client may
continue to write to a block even if a few datanodes in the write pipeline
have failed, or the client may add a new datanodes once a datanode has
failed.
@return the number of valid replicas of the current block]]>
</doc>
</method>
<method name="hsync"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="syncFlags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Sync buffered data to DataNodes (flush to disk devices).
@param syncFlags
Indicate the detailed semantic and actions of the hsync.
@throws IOException
@see FSDataOutputStream#hsync()]]>
</doc>
</method>
<doc>
<![CDATA[The Hdfs implementation of {@link FSDataOutputStream}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsUtils -->
<class name="HdfsUtils" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsUtils"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isHealthy" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="uri" type="java.net.URI"/>
<doc>
<![CDATA[Is the HDFS healthy?
HDFS is considered as healthy if it is up and not in safemode.
@param uri the HDFS URI. Note that the URI path is ignored.
@return true if HDFS is healthy; false, otherwise.]]>
</doc>
</method>
<doc>
<![CDATA[The public utility API for HDFS.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsUtils -->
</package>
<package name="org.apache.hadoop.hdfs.inotify">
<!-- start class org.apache.hadoop.hdfs.inotify.Event -->
<class name="Event" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event" type="org.apache.hadoop.hdfs.inotify.Event.EventType"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getEventType" return="org.apache.hadoop.hdfs.inotify.Event.EventType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Events sent by the inotify system. Note that no events are necessarily sent
when a file is opened for read (although a MetadataUpdateEvent will be sent
if the atime is updated).]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event -->
<!-- start class org.apache.hadoop.hdfs.inotify.EventBatch -->
<class name="EventBatch" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="EventBatch" type="long, org.apache.hadoop.hdfs.inotify.Event[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getTxid" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getEvents" return="org.apache.hadoop.hdfs.inotify.Event[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A batch of events that all happened on the same transaction ID.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.EventBatch -->
<!-- start class org.apache.hadoop.hdfs.inotify.MissingEventsException -->
<class name="MissingEventsException" extends="java.lang.Exception"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="MissingEventsException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="MissingEventsException" type="long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getExpectedTxid" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getActualTxid" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.MissingEventsException -->
</package>
<package name="org.apache.hadoop.hdfs.net">
</package>
<package name="org.apache.hadoop.hdfs.protocol">
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry -->
<class name="CacheDirectiveEntry" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CacheDirectiveEntry" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo, org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStats" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a path-based cache directive entry.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo -->
<class name="CacheDirectiveInfo" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getId" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The ID of this directive.]]>
</doc>
</method>
<method name="getPath" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The path used in this request.]]>
</doc>
</method>
<method name="getReplication" return="java.lang.Short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The number of times the block should be cached.]]>
</doc>
</method>
<method name="getPool" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The pool used in this request.]]>
</doc>
</method>
<method name="getExpiration" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return When this directive expires.]]>
</doc>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a path-based cache directive.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveStats -->
<class name="CacheDirectiveStats" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getBytesNeeded" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The bytes needed.]]>
</doc>
</method>
<method name="getBytesCached" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The bytes cached.]]>
</doc>
</method>
<method name="getFilesNeeded" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The number of files needed.]]>
</doc>
</method>
<method name="getFilesCached" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The number of files cached.]]>
</doc>
</method>
<method name="hasExpired" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Whether this directive has expired.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a path-based cache directive.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveStats -->
<!-- start class org.apache.hadoop.hdfs.protocol.CachePoolEntry -->
<class name="CachePoolEntry" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CachePoolEntry" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo, org.apache.hadoop.hdfs.protocol.CachePoolStats"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStats" return="org.apache.hadoop.hdfs.protocol.CachePoolStats"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a Cache Pool entry.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CachePoolEntry -->
<!-- start class org.apache.hadoop.hdfs.protocol.CachePoolInfo -->
<class name="CachePoolInfo" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CachePoolInfo" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPoolName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Name of the pool.]]>
</doc>
</method>
<method name="getOwnerName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The owner of the pool. Along with the group and mode, determines
who has access to view and modify the pool.]]>
</doc>
</method>
<method name="setOwnerName" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ownerName" type="java.lang.String"/>
</method>
<method name="getGroupName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The group of the pool. Along with the owner and mode, determines
who has access to view and modify the pool.]]>
</doc>
</method>
<method name="setGroupName" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="groupName" type="java.lang.String"/>
</method>
<method name="getMode" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Unix-style permissions of the pool. Along with the owner and group,
determines who has access to view and modify the pool.]]>
</doc>
</method>
<method name="setMode" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="mode" type="org.apache.hadoop.fs.permission.FsPermission"/>
</method>
<method name="getLimit" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The maximum aggregate number of bytes that can be cached by
directives in this pool.]]>
</doc>
</method>
<method name="setLimit" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytes" type="java.lang.Long"/>
</method>
<method name="getMaxRelativeExpiryMs" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The maximum relative expiration of directives of this pool in
milliseconds]]>
</doc>
</method>
<method name="setMaxRelativeExpiryMs" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ms" type="java.lang.Long"/>
<doc>
<![CDATA[Set the maximum relative expiration of directives of this pool in
milliseconds.
@param ms in milliseconds
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="validate"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="validateName"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="poolName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="RELATIVE_EXPIRY_NEVER" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Indicates that the pool does not have a maximum relative expiry.]]>
</doc>
</field>
<field name="DEFAULT_MAX_RELATIVE_EXPIRY" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default max relative expiry for cache pools.]]>
</doc>
</field>
<field name="LIMIT_UNLIMITED" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DEFAULT_LIMIT" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[CachePoolInfo describes a cache pool.
This class is used in RPCs to create and modify cache pools.
It is serializable and can be stored in the edit log.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CachePoolInfo -->
<!-- start class org.apache.hadoop.hdfs.protocol.CachePoolStats -->
<class name="CachePoolStats" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getBytesNeeded" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesCached" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesOverlimit" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFilesNeeded" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFilesCached" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[CachePoolStats describes cache pool statistics.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CachePoolStats -->
<!-- start class org.apache.hadoop.hdfs.protocol.EncryptionZone -->
<class name="EncryptionZone" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="EncryptionZone" type="long, java.lang.String, org.apache.hadoop.crypto.CipherSuite, org.apache.hadoop.crypto.CryptoProtocolVersion, java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSuite" return="org.apache.hadoop.crypto.CipherSuite"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getVersion" return="org.apache.hadoop.crypto.CryptoProtocolVersion"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getKeyName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A simple class for representing an encryption zone. Presently an encryption
zone only has a path (the root of the encryption zone), a key name, and a
unique id. The id is used to implement batched listing of encryption zones.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.EncryptionZone -->
</package>
<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
</package>
<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
</package>
<package name="org.apache.hadoop.hdfs.protocolPB">
</package>
<package name="org.apache.hadoop.hdfs.qjournal.client">
</package>
<package name="org.apache.hadoop.hdfs.qjournal.protocol">
</package>
<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
</package>
<package name="org.apache.hadoop.hdfs.qjournal.server">
<!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
<interface name="JournalNodeMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getJournalsStatus" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
@return A string presenting status for each journal]]>
</doc>
</method>
<doc>
<![CDATA[This is the JMX management interface for JournalNode information]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
</package>
<package name="org.apache.hadoop.hdfs.security.token.block">
</package>
<package name="org.apache.hadoop.hdfs.security.token.delegation">
</package>
<package name="org.apache.hadoop.hdfs.server.balancer">
</package>
<package name="org.apache.hadoop.hdfs.server.blockmanagement">
</package>
<package name="org.apache.hadoop.hdfs.server.common">
</package>
<package name="org.apache.hadoop.hdfs.server.datanode">
<!-- start interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean -->
<interface name="DataNodeMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getVersion" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the version of Hadoop.
@return the version of Hadoop]]>
</doc>
</method>
<method name="getRpcPort" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the rpc port.
@return the rpc port]]>
</doc>
</method>
<method name="getHttpPort" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the http port.
@return the http port]]>
</doc>
</method>
<method name="getNamenodeAddresses" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the namenode IP addresses
@return the namenode IP addresses that the datanode is talking to]]>
</doc>
</method>
<method name="getVolumeInfo" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the information of each volume on the Datanode. Please
see the implementation for the format of returned information.
@return the volume info]]>
</doc>
</method>
<method name="getClusterId" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the cluster id.
@return the cluster id]]>
</doc>
</method>
<method name="getXceiverCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns an estimate of the number of Datanode threads
actively transferring blocks.]]>
</doc>
</method>
<method name="getDatanodeNetworkCounts" return="java.util.Map"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the network error counts on a per-Datanode basis.]]>
</doc>
</method>
<doc>
<![CDATA[This is the JMX management interface for data node information]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean -->
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.web">
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
</package>
<package name="org.apache.hadoop.hdfs.server.mover">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode">
<!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
<interface name="AuditLogger" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="initialize"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[Called during initialization of the logger.
@param conf The configuration object.]]>
</doc>
</method>
<method name="logAuditEvent"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="succeeded" type="boolean"/>
<param name="userName" type="java.lang.String"/>
<param name="addr" type="java.net.InetAddress"/>
<param name="cmd" type="java.lang.String"/>
<param name="src" type="java.lang.String"/>
<param name="dst" type="java.lang.String"/>
<param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
<doc>
<![CDATA[Called to log an audit event.
<p>
This method must return as quickly as possible, since it's called
in a critical section of the NameNode's operation.
@param succeeded Whether authorization succeeded.
@param userName Name of the user executing the request.
@param addr Remote address of the request.
@param cmd The requested command.
@param src Path of affected source file.
@param dst Path of affected destination file (if any).
@param stat File information for operations that change the file's
metadata (permissions, owner, times, etc).]]>
</doc>
</method>
<doc>
<![CDATA[Interface defining an audit logger.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
<class name="HdfsAuditLogger" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
<constructor name="HdfsAuditLogger"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="logAuditEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="succeeded" type="boolean"/>
<param name="userName" type="java.lang.String"/>
<param name="addr" type="java.net.InetAddress"/>
<param name="cmd" type="java.lang.String"/>
<param name="src" type="java.lang.String"/>
<param name="dst" type="java.lang.String"/>
<param name="status" type="org.apache.hadoop.fs.FileStatus"/>
</method>
<method name="logAuditEvent"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="succeeded" type="boolean"/>
<param name="userName" type="java.lang.String"/>
<param name="addr" type="java.net.InetAddress"/>
<param name="cmd" type="java.lang.String"/>
<param name="src" type="java.lang.String"/>
<param name="dst" type="java.lang.String"/>
<param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
<doc>
<![CDATA[Same as
{@link #logAuditEvent(boolean, String, InetAddress, String, String, String, FileStatus)}
with additional parameters related to logging delegation token tracking
IDs.
@param succeeded Whether authorization succeeded.
@param userName Name of the user executing the request.
@param addr Remote address of the request.
@param cmd The requested command.
@param src Path of affected source file.
@param dst Path of affected destination file (if any).
@param stat File information for operations that change the file's metadata
(permissions, owner, times, etc).
@param ugi UserGroupInformation of the current user, or null if not logging
token tracking information
@param dtSecretManager The token secret manager, or null if not logging
token tracking information]]>
</doc>
</method>
<doc>
<![CDATA[Extension of {@link AuditLogger}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
<class name="INodeAttributeProvider" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="INodeAttributeProvider"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="start"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Initialize the provider. This method is called at NameNode startup
time.]]>
</doc>
</method>
<method name="stop"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
</doc>
</method>
<method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="fullPath" type="java.lang.String"/>
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
</method>
<method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pathElements" type="java.lang.String[]"/>
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
</method>
<method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
<doc>
<![CDATA[Can be over-ridden by implementations to provide a custom Access Control
Enforcer that can provide an alternate implementation of the
default permission checking logic.
@param defaultEnforcer The Default AccessControlEnforcer
@return The AccessControlEnforcer to use]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
<!-- start interface org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean -->
<interface name="NameNodeMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getVersion" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the version of Hadoop.
@return the version]]>
</doc>
</method>
<method name="getSoftwareVersion" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the version of software running on the Namenode
@return a string representing the version]]>
</doc>
</method>
<method name="getUsed" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the used space by data nodes.
@return the used space by data nodes]]>
</doc>
</method>
<method name="getFree" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets total non-used raw bytes.
@return total non-used raw bytes]]>
</doc>
</method>
<method name="getTotal" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets total raw bytes including non-dfs used space.
@return the total raw bytes including non-dfs used space]]>
</doc>
</method>
<method name="getSafemode" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the safemode status
@return the safemode status]]>
</doc>
</method>
<method name="isUpgradeFinalized" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Checks if upgrade is finalized.
@return true, if upgrade is finalized]]>
</doc>
</method>
<method name="getRollingUpgradeStatus" return="org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo.Bean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the RollingUpgrade information.
@return Rolling upgrade information if an upgrade is in progress. Else
(e.g. if there is no upgrade or the upgrade is finalized), returns null.]]>
</doc>
</method>
<method name="getNonDfsUsedSpace" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets total used space by data nodes for non DFS purposes such as storing
temporary files on the local file system
@return the non dfs space of the cluster]]>
</doc>
</method>
<method name="getPercentUsed" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total used space by data nodes as percentage of total capacity
@return the percentage of used space on the cluster.]]>
</doc>
</method>
<method name="getPercentRemaining" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total remaining space by data nodes as percentage of total
capacity
@return the percentage of the remaining space on the cluster]]>
</doc>
</method>
<method name="getCacheUsed" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns the amount of cache used by the datanode (in bytes).]]>
</doc>
</method>
<method name="getCacheCapacity" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns the total cache capacity of the datanode (in bytes).]]>
</doc>
</method>
<method name="getBlockPoolUsedSpace" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the total space used by the block pools of this namenode]]>
</doc>
</method>
<method name="getPercentBlockPoolUsed" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the total space used by the block pool as percentage of total capacity]]>
</doc>
</method>
<method name="getTotalBlocks" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total numbers of blocks on the cluster.
@return the total number of blocks of the cluster]]>
</doc>
</method>
<method name="getTotalFiles" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total number of files on the cluster
@return the total number of files on the cluster]]>
</doc>
</method>
<method name="getNumberOfMissingBlocks" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total number of missing blocks on the cluster
@return the total number of missing blocks on the cluster]]>
</doc>
</method>
<method name="getNumberOfMissingBlocksWithReplicationFactorOne" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total number of missing blocks on the cluster with
replication factor 1
@return the total number of missing blocks on the cluster with
replication factor 1]]>
</doc>
</method>
<method name="getThreads" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the number of threads.
@return the number of threads]]>
</doc>
</method>
<method name="getLiveNodes" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the live node information of the cluster.
@return the live node information]]>
</doc>
</method>
<method name="getDeadNodes" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the dead node information of the cluster.
@return the dead node information]]>
</doc>
</method>
<method name="getDecomNodes" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the decommissioning node information of the cluster.
@return the decommissioning node information]]>
</doc>
</method>
<method name="getClusterId" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the cluster id.
@return the cluster id]]>
</doc>
</method>
<method name="getBlockPoolId" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the block pool id.
@return the block pool id]]>
</doc>
</method>
<method name="getNameDirStatuses" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get status information about the directories storing image and edits logs
of the NN.
@return the name dir status information, as a JSON string.]]>
</doc>
</method>
<method name="getNodeUsage" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get Max, Median, Min and Standard Deviation of DataNodes usage.
@return the DataNode usage information, as a JSON string.]]>
</doc>
</method>
<method name="getNameJournalStatus" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get status information about the journals of the NN.
@return the name journal status information, as a JSON string.]]>
</doc>
</method>
<method name="getJournalTransactionInfo" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get information about the transaction ID, including the last applied
transaction ID and the most recent checkpoint's transaction ID]]>
</doc>
</method>
<method name="getNNStarted" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the NN start time
@return the NN start time]]>
</doc>
</method>
<method name="getCompileInfo" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the compilation information which contains date, user and branch
@return the compilation information, as a JSON string.]]>
</doc>
</method>
<method name="getCorruptFiles" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the list of corrupt files
@return the list of corrupt files, as a JSON string.]]>
</doc>
</method>
<method name="getDistinctVersionCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the number of distinct versions of live datanodes
@return the number of distinct versions of live datanodes]]>
</doc>
</method>
<method name="getDistinctVersions" return="java.util.Map"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the number of live datanodes for each distinct versions
@return the number of live datanodes for each distinct versions]]>
</doc>
</method>
<doc>
<![CDATA[This is the JMX management interface for namenode information]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean -->
<!-- start interface org.apache.hadoop.hdfs.server.namenode.NameNodeStatusMXBean -->
<interface name="NameNodeStatusMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getNNRole" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the NameNode role.
@return the NameNode role.]]>
</doc>
</method>
<method name="getState" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the NameNode state.
@return the NameNode state.]]>
</doc>
</method>
<method name="getHostAndPort" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the host and port colon separated.
@return host and port colon separated.]]>
</doc>
</method>
<method name="isSecurityEnabled" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets if security is enabled.
@return true, if security is enabled.]]>
</doc>
</method>
<method name="getLastHATransitionTime" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the most recent HA transition time in milliseconds from the epoch.
@return the most recent HA transition time in milliseconds from the epoch.]]>
</doc>
</method>
<doc>
<![CDATA[This is the JMX management interface for NameNode status information]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.NameNodeStatusMXBean -->
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.ha">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.top">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
</package>
<package name="org.apache.hadoop.hdfs.server.protocol">
</package>
<package name="org.apache.hadoop.hdfs.shortcircuit">
</package>
<package name="org.apache.hadoop.hdfs.tools">
</package>
<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
</package>
<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
</package>
<package name="org.apache.hadoop.hdfs.tools.snapshot">
</package>
<package name="org.apache.hadoop.hdfs.util">
</package>
<package name="org.apache.hadoop.hdfs.web">
</package>
<package name="org.apache.hadoop.hdfs.web.resources">
</package>
</api>