hadoop/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml

21506 lines
962 KiB
XML

<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<!-- Generated by the JDiff Javadoc doclet -->
<!-- (http://www.jdiff.org) -->
<!-- on Thu Aug 18 16:02:32 PDT 2016 -->
<api
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xsi:noNamespaceSchemaLocation='api.xsd'
name="Apache Hadoop HDFS 2.7.2"
jdversion="1.0.9">
<!-- Command line arguments = -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_92.jdk/Contents/Home/lib/tools.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/wtan/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/wtan/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/wtan/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/wtan/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/wtan/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/wtan/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/wtan/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/wtan/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/wtan/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/wtan/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/wtan/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/wtan/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/wtan/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/wtan/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/wtan/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/wtan/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/wtan/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/wtan/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/wtan/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/wtan/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/wtan/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/wtan/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/wtan/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/wtan/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/wtan/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/wtan/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/wtan/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/wtan/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/wtan/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/wtan/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/wtan/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/wtan/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/wtan/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/wtan/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/wtan/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/wtan/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/wtan/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/wtan/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/wtan/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/wtan/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/wtan/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/wtan/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/wtan/.m2/repository/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/Users/wtan/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/wtan/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/wtan/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/wtan/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/wtan/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/wtan/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/wtan/.m2/repository/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/Users/wtan/.m2/repository/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/Users/wtan/.m2/repository/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/Users/wtan/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/wtan/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -apidir /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.7.2 -->
<package name="org.apache.hadoop.fs">
<!-- start class org.apache.hadoop.fs.BlockStorageLocation -->
<class name="BlockStorageLocation" extends="org.apache.hadoop.fs.BlockLocation"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockStorageLocation" type="org.apache.hadoop.fs.BlockLocation, org.apache.hadoop.fs.VolumeId[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getVolumeIds" return="org.apache.hadoop.fs.VolumeId[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the list of {@link VolumeId} corresponding to the block's replicas.
@return volumeIds list of VolumeId for the block's replicas]]>
</doc>
</method>
<doc>
<![CDATA[Wrapper for {@link BlockLocation} that also adds {@link VolumeId} volume
location information for each replica.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.BlockStorageLocation -->
<!-- start class org.apache.hadoop.fs.CacheFlag -->
<class name="CacheFlag" extends="java.lang.Enum"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.fs.CacheFlag[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.fs.CacheFlag"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Specifies semantics for CacheDirective operations. Multiple flags can
be combined in an EnumSet.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.CacheFlag -->
<!-- start class org.apache.hadoop.fs.HdfsVolumeId -->
<class name="HdfsVolumeId" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.fs.VolumeId"/>
<constructor name="HdfsVolumeId" type="byte[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="compareTo" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="arg0" type="org.apache.hadoop.fs.VolumeId"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[HDFS-specific volume identifier which implements {@link VolumeId}. Can be
used to differentiate between the data directories on a single datanode. This
identifier is only unique on a per-datanode basis.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.HdfsVolumeId -->
<!-- start interface org.apache.hadoop.fs.VolumeId -->
<interface name="VolumeId" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.lang.Comparable"/>
<method name="compareTo" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="arg0" type="org.apache.hadoop.fs.VolumeId"/>
</method>
<method name="hashCode" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<doc>
<![CDATA[Opaque interface that identifies a disk location. Subclasses
should implement {@link Comparable} and override both equals and hashCode.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.fs.VolumeId -->
<!-- start class org.apache.hadoop.fs.XAttr.Builder -->
<class name="XAttr.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setNameSpace" return="org.apache.hadoop.fs.XAttr.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ns" type="org.apache.hadoop.fs.XAttr.NameSpace"/>
</method>
<method name="setName" return="org.apache.hadoop.fs.XAttr.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="setValue" return="org.apache.hadoop.fs.XAttr.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="value" type="byte[]"/>
</method>
<method name="build" return="org.apache.hadoop.fs.XAttr"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.fs.XAttr.Builder -->
<!-- start class org.apache.hadoop.fs.XAttr.NameSpace -->
<class name="XAttr.NameSpace" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.fs.XAttr.NameSpace[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.fs.XAttr.NameSpace"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.fs.XAttr.NameSpace -->
</package>
<package name="org.apache.hadoop.hdfs">
<!-- start interface org.apache.hadoop.hdfs.BlockReader -->
<interface name="BlockReader" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.fs.ByteBufferReadable"/>
<method name="read" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="buf" type="byte[]"/>
<param name="off" type="int"/>
<param name="len" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="skip" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="n" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Skip the given number of bytes]]>
</doc>
</method>
<method name="available" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns an estimate of the number of bytes that can be read
(or skipped over) from this input stream without performing
network I/O.
This may return more than what is actually present in the block.]]>
</doc>
</method>
<method name="close"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Close the block reader.
@throws IOException]]>
</doc>
</method>
<method name="readFully"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="buf" type="byte[]"/>
<param name="readOffset" type="int"/>
<param name="amtToRead" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Read exactly the given amount of data, throwing an exception
if EOF is reached before that amount]]>
</doc>
</method>
<method name="readAll" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="buf" type="byte[]"/>
<param name="offset" type="int"/>
<param name="len" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Similar to {@link #readFully(byte[], int, int)} except that it will
not throw an exception on EOF. However, it differs from the simple
{@link #read(byte[], int, int)} call in that it is guaranteed to
read the data if it is available. In other words, if this call
does not throw an exception, then either the buffer has been
filled or the next call will return EOF.]]>
</doc>
</method>
<method name="isLocal" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true only if this is a local read.]]>
</doc>
</method>
<method name="isShortCircuit" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true only if this is a short-circuit read.
All short-circuit reads are also local.]]>
</doc>
</method>
<method name="getClientMmap" return="org.apache.hadoop.hdfs.shortcircuit.ClientMmap"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="opts" type="java.util.EnumSet"/>
<doc>
<![CDATA[Get a ClientMmap object for this BlockReader.
@param opts The read options to use.
@return The ClientMmap object, or null if mmap is not
supported.]]>
</doc>
</method>
<doc>
<![CDATA[A BlockReader is responsible for reading a single block
from a single datanode.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.BlockReader -->
<!-- start class org.apache.hadoop.hdfs.BlockReaderFactory.BlockReaderPeer -->
<class name="BlockReaderFactory.BlockReaderPeer" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</class>
<!-- end class org.apache.hadoop.hdfs.BlockReaderFactory.BlockReaderPeer -->
<!-- start class org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector -->
<class name="BlockReaderFactory.FailureInjector" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="FailureInjector"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="injectRequestFileDescriptorsFailure"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getSupportsReceiptVerification" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector -->
<!-- start class org.apache.hadoop.hdfs.CorruptFileBlockIterator -->
<class name="CorruptFileBlockIterator" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.fs.RemoteIterator"/>
<constructor name="CorruptFileBlockIterator" type="org.apache.hadoop.hdfs.DFSClient, org.apache.hadoop.fs.Path"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getCallsMade" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the number of calls made to the DFSClient.
This is for debugging and testing purposes.]]>
</doc>
</method>
<method name="hasNext" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="next" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[Provides an iterator interface for listCorruptFileBlocks.
This class is used by DistributedFileSystem and Hdfs.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.CorruptFileBlockIterator -->
<!-- start class org.apache.hadoop.hdfs.DFSClient.Conf -->
<class name="DFSClient.Conf" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Conf" type="org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isUseLegacyBlockReaderLocal" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDomainSocketPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isShortCircuitLocalReads" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isDomainSocketDataTraffic" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="brfFailureInjector" type="org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector"
transient="false" volatile="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[DFSClient configuration]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSClient.Conf -->
<!-- start class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
<class name="DFSClient.DFSDataInputStream" extends="org.apache.hadoop.hdfs.client.HdfsDataInputStream"
abstract="false"
static="true" final="false" visibility="public"
deprecated="use {@link HdfsDataInputStream} instead.">
<constructor name="DFSDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<doc>
<![CDATA[@deprecated use {@link HdfsDataInputStream} instead.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
<!-- start class org.apache.hadoop.hdfs.DFSHedgedReadMetrics -->
<class name="DFSHedgedReadMetrics" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DFSHedgedReadMetrics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="incHedgedReadOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incHedgedReadOpsInCurThread"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incHedgedReadWins"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHedgedReadOps" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHedgedReadOpsInCurThread" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHedgedReadWins" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="hedgedReadOps" type="java.util.concurrent.atomic.AtomicLong"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="hedgedReadOpsWin" type="java.util.concurrent.atomic.AtomicLong"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="hedgedReadOpsInCurThread" type="java.util.concurrent.atomic.AtomicLong"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[The client-side metrics for hedged read feature.
This class has a number of metrics variables that are publicly accessible,
we can grab them from client side, like HBase.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSHedgedReadMetrics -->
<!-- start class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
<class name="DFSInotifyEventInputStream" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="poll" return="org.apache.hadoop.hdfs.inotify.EventBatch"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next batch of events in the stream or null if no new
batches are currently available.
@throws IOException because of network error or edit log
corruption. Also possible if JournalNodes are unresponsive in the
QJM setting (even one unresponsive JournalNode is enough in rare cases),
so catching this exception and retrying at least a few times is
recommended.
@throws MissingEventsException if we cannot return the next batch in the
stream because the data for the events (and possibly some subsequent
events) has been deleted (generally because this stream is a very large
number of transactions behind the current state of the NameNode). It is
safe to continue reading from the stream after this exception is thrown
The next available batch of events will be returned.]]>
</doc>
</method>
<method name="getTxidsBehindEstimate" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return a estimate of how many transaction IDs behind the NameNode's
current state this stream is. Clients should periodically call this method
and check if its result is steadily increasing, which indicates that they
are falling behind (i.e. transaction are being generated faster than the
client is reading them). If a client falls too far behind events may be
deleted before the client can read them.
<p/>
A return value of -1 indicates that an estimate could not be produced, and
should be ignored. The value returned by this method is really only useful
when compared to previous or subsequent returned values.]]>
</doc>
</method>
<method name="poll" return="org.apache.hadoop.hdfs.inotify.EventBatch"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="time" type="long"/>
<param name="tu" type="java.util.concurrent.TimeUnit"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next event batch in the stream, waiting up to the specified
amount of time for a new batch. Returns null if one is not available at the
end of the specified amount of time. The time before the method returns may
exceed the specified amount of time by up to the time required for an RPC
to the NameNode.
@param time number of units of the given TimeUnit to wait
@param tu the desired TimeUnit
@throws IOException see {@link DFSInotifyEventInputStream#poll()}
@throws MissingEventsException
see {@link DFSInotifyEventInputStream#poll()}
@throws InterruptedException if the calling thread is interrupted]]>
</doc>
</method>
<method name="take" return="org.apache.hadoop.hdfs.inotify.EventBatch"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next batch of events in the stream, waiting indefinitely if
a new batch is not immediately available.
@throws IOException see {@link DFSInotifyEventInputStream#poll()}
@throws MissingEventsException see
{@link DFSInotifyEventInputStream#poll()}
@throws InterruptedException if the calling thread is interrupted]]>
</doc>
</method>
<field name="LOG" type="org.slf4j.Logger"
transient="false" volatile="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Stream for reading inotify events. DFSInotifyEventInputStreams should not
be shared among multiple threads.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
<!-- start class org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics -->
<class name="DFSInputStream.ReadStatistics" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReadStatistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ReadStatistics" type="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getTotalBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total bytes read. This will always be at least as
high as the other numbers, since it includes all of them.]]>
</doc>
</method>
<method name="getTotalLocalBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total local bytes read. This will always be at least
as high as totalShortCircuitBytesRead, since all short-circuit
reads are also local.]]>
</doc>
</method>
<method name="getTotalShortCircuitBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total short-circuit local bytes read.]]>
</doc>
</method>
<method name="getTotalZeroCopyBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total number of zero-copy bytes read.]]>
</doc>
</method>
<method name="getRemoteBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total number of bytes read which were not local.]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics -->
<!-- start class org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress -->
<class name="DFSUtil.ConfiguredNNAddress" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getNameserviceId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getNamenodeId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAddress" return="java.net.InetSocketAddress"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Represent one of the NameNodes configured in the cluster.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress -->
<!-- start class org.apache.hadoop.hdfs.ExtendedBlockId -->
<class name="ExtendedBlockId" extends="java.lang.Object"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="ExtendedBlockId" type="long, java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="fromExtendedBlock" return="org.apache.hadoop.hdfs.ExtendedBlockId"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
</method>
<method name="getBlockId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBlockPoolId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[An immutable key which identifies a block.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.ExtendedBlockId -->
<!-- start class org.apache.hadoop.hdfs.HAUtil -->
<class name="HAUtil" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="isHAEnabled" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<doc>
<![CDATA[Returns true if HA for namenode is configured for the given nameservice
@param conf Configuration
@param nsId nameservice, or null if no federated NS is configured
@return true if HA is configured in the configuration; else false.]]>
</doc>
</method>
<method name="usesSharedEditsDir" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[Returns true if HA is using a shared edits directory.
@param conf Configuration
@return true if HA config is using a shared edits dir, false otherwise.]]>
</doc>
</method>
<method name="getNameNodeId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<doc>
<![CDATA[Get the namenode Id by matching the {@code addressKey}
with the the address of the local node.
If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically
configured, this method determines the namenode Id by matching the local
node's address with the configured addresses. When a match is found, it
returns the namenode Id from the corresponding configuration key.
@param conf Configuration
@return namenode Id on success, null on failure.
@throws HadoopIllegalArgumentException on error]]>
</doc>
</method>
<method name="getNameNodeIdFromAddress" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="address" type="java.net.InetSocketAddress"/>
<param name="keys" type="java.lang.String[]"/>
<doc>
<![CDATA[Similar to
{@link DFSUtil#getNameServiceIdFromAddress(Configuration,
InetSocketAddress, String...)}]]>
</doc>
</method>
<method name="getNameNodeIdOfOtherNode" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<doc>
<![CDATA[Get the NN ID of the other node in an HA setup.
@param conf the configuration of this node
@return the NN ID of the other node in this nameservice]]>
</doc>
</method>
<method name="getConfForOtherNode" return="org.apache.hadoop.conf.Configuration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="myConf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[Given the configuration for this node, return a Configuration object for
the other node in an HA setup.
@param myConf the configuration of this node
@return the configuration of the other node in an HA setup]]>
</doc>
</method>
<method name="shouldAllowStandbyReads" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[This is used only by tests at the moment.
@return true if the NN should allow read operations while in standby mode.]]>
</doc>
</method>
<method name="setAllowStandbyReads"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="val" type="boolean"/>
</method>
<method name="isLogicalUri" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<doc>
<![CDATA[@return true if the given nameNodeUri appears to be a logical URI.]]>
</doc>
</method>
<method name="isClientFailoverConfigured" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<doc>
<![CDATA[Check whether the client has a failover proxy provider configured
for the namenode/nameservice.
@param conf Configuration
@param nameNodeUri The URI of namenode
@return true if failover is configured.]]>
</doc>
</method>
<method name="useLogicalUri" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Check whether logical URI is needed for the namenode and
the corresponding failover proxy provider in the config.
@param conf Configuration
@param nameNodeUri The URI of namenode
@return true if logical URI is needed. false, if not needed.
@throws IOException most likely due to misconfiguration.]]>
</doc>
</method>
<method name="getServiceUriFromToken" return="java.net.URI"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="scheme" type="java.lang.String"/>
<param name="token" type="org.apache.hadoop.security.token.Token"/>
<doc>
<![CDATA[Parse the file system URI out of the provided token.]]>
</doc>
</method>
<method name="buildTokenServiceForLogicalUri" return="org.apache.hadoop.io.Text"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="uri" type="java.net.URI"/>
<param name="scheme" type="java.lang.String"/>
<doc>
<![CDATA[Get the service name used in the delegation token for the given logical
HA service.
@param uri the logical URI of the cluster
@param scheme the scheme of the corresponding FileSystem
@return the service name]]>
</doc>
</method>
<method name="isTokenForLogicalUri" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="token" type="org.apache.hadoop.security.token.Token"/>
<doc>
<![CDATA[@return true if this token corresponds to a logical nameservice
rather than a specific namenode.]]>
</doc>
</method>
<method name="buildTokenServicePrefixForLogicalUri" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="scheme" type="java.lang.String"/>
</method>
<method name="cloneDelegationTokenForLogicalUri"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="haUri" type="java.net.URI"/>
<param name="nnAddrs" type="java.util.Collection"/>
<doc>
<![CDATA[Locate a delegation token associated with the given HA cluster URI, and if
one is found, clone it to also represent the underlying namenode address.
@param ugi the UGI to modify
@param haUri the logical URI for the cluster
@param nnAddrs collection of NNs in the cluster to which the token
applies]]>
</doc>
</method>
<method name="getAddressOfActive" return="java.net.InetSocketAddress"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the internet address of the currently-active NN. This should rarely be
used, since callers of this method who connect directly to the NN using the
resulting InetSocketAddress will not be able to connect to the active NN if
a failover were to occur after this method has been called.
@param fs the file system to get the active address of.
@return the internet address of the currently-active NN.
@throws IOException if an error occurs while resolving the active NN.]]>
</doc>
</method>
<method name="getProxiesForAllNameNodesInNameservice" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
call should be made on every NN in an HA nameservice, not just the active.
@param conf configuration
@param nsId the nameservice to get all of the proxies for.
@return a list of RPC proxies for each NN in the nameservice.
@throws IOException in the event of error.]]>
</doc>
</method>
<method name="getProxiesForAllNameNodesInNameservice" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<param name="xface" type="java.lang.Class"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
call should be made on every NN in an HA nameservice, not just the active.
@param conf configuration
@param nsId the nameservice to get all of the proxies for.
@param xface the protocol class.
@return a list of RPC proxies for each NN in the nameservice.
@throws IOException in the event of error.]]>
</doc>
</method>
<method name="isAtLeastOneActive" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="namenodes" type="java.util.List"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Used to ensure that at least one of the given HA NNs is currently in the
active state..
@param namenodes list of RPC proxies for each NN to check.
@return true if at least one NN is active, false if all are in the standby state.
@throws IOException in the event of error.]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.HAUtil -->
<!-- start class org.apache.hadoop.hdfs.KeyProviderCache -->
<class name="KeyProviderCache" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="KeyProviderCache" type="long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="get" return="org.apache.hadoop.crypto.key.KeyProvider"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
</method>
<method name="setKeyProvider"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="keyProvider" type="org.apache.hadoop.crypto.key.KeyProvider"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.KeyProviderCache -->
<!-- start class org.apache.hadoop.hdfs.NameNodeProxies -->
<class name="NameNodeProxies" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="NameNodeProxies"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="createProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<param name="xface" type="java.lang.Class"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates the namenode proxy with the passed protocol. This will handle
creation of either HA- or non-HA-enabled proxy objects, depending upon
if the provided URI is a configured logical URI.
@param conf the configuration containing the required IPC
properties, client failover configurations, etc.
@param nameNodeUri the URI pointing either to a specific NameNode
or to a logical nameservice.
@param xface the IPC interface which should be created
@return an object containing both the proxy and the associated
delegation token service it corresponds to
@throws IOException if there is an error creating the proxy]]>
</doc>
</method>
<method name="createProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<param name="xface" type="java.lang.Class"/>
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates the namenode proxy with the passed protocol. This will handle
creation of either HA- or non-HA-enabled proxy objects, depending upon
if the provided URI is a configured logical URI.
@param conf the configuration containing the required IPC
properties, client failover configurations, etc.
@param nameNodeUri the URI pointing either to a specific NameNode
or to a logical nameservice.
@param xface the IPC interface which should be created
@param fallbackToSimpleAuth set to true or false during calls to indicate if
a secure client falls back to simple auth
@return an object containing both the proxy and the associated
delegation token service it corresponds to
@throws IOException if there is an error creating the proxy]]>
</doc>
</method>
<method name="createProxyWithLossyRetryHandler" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="config" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<param name="xface" type="java.lang.Class"/>
<param name="numResponseToDrop" type="int"/>
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Generate a dummy namenode proxy instance that utilizes our hacked
{@link LossyRetryInvocationHandler}. Proxy instance generated using this
method will proactively drop RPC responses. Currently this method only
support HA setup. null will be returned if the given configuration is not
for HA.
@param config the configuration containing the required IPC
properties, client failover configurations, etc.
@param nameNodeUri the URI pointing either to a specific NameNode
or to a logical nameservice.
@param xface the IPC interface which should be created
@param numResponseToDrop The number of responses to drop for each RPC call
@param fallbackToSimpleAuth set to true or false during calls to indicate if
a secure client falls back to simple auth
@return an object containing both the proxy and the associated
delegation token service it corresponds to. Will return null of the
given configuration does not support HA.
@throws IOException if there is an error creating the proxy]]>
</doc>
</method>
<method name="createNonHAProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nnAddr" type="java.net.InetSocketAddress"/>
<param name="xface" type="java.lang.Class"/>
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="withRetries" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates an explicitly non-HA-enabled proxy object. Most of the time you
don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
@param conf the configuration object
@param nnAddr address of the remote NN to connect to
@param xface the IPC interface which should be created
@param ugi the user who is making the calls on the proxy object
@param withRetries certain interfaces have a non-standard retry policy
@return an object containing both the proxy and the associated
delegation token service it corresponds to
@throws IOException]]>
</doc>
</method>
<method name="createNonHAProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nnAddr" type="java.net.InetSocketAddress"/>
<param name="xface" type="java.lang.Class"/>
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="withRetries" type="boolean"/>
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates an explicitly non-HA-enabled proxy object. Most of the time you
don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
@param conf the configuration object
@param nnAddr address of the remote NN to connect to
@param xface the IPC interface which should be created
@param ugi the user who is making the calls on the proxy object
@param withRetries certain interfaces have a non-standard retry policy
@param fallbackToSimpleAuth - set to true or false during this method to
indicate if a secure client falls back to simple auth
@return an object containing both the proxy and the associated
delegation token service it corresponds to
@throws IOException]]>
</doc>
</method>
<method name="getFailoverProxyProviderClass" return="java.lang.Class"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Gets the configured Failover proxy provider's class]]>
</doc>
</method>
<method name="createFailoverProxyProvider" return="org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<param name="xface" type="java.lang.Class"/>
<param name="checkPort" type="boolean"/>
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates the Failover proxy provider instance]]>
</doc>
</method>
<doc>
<![CDATA[Create proxy objects to communicate with a remote NN. All remote access to an
NN should be funneled through this class. Most of the time you'll want to use
{@link NameNodeProxies#createProxy(Configuration, URI, Class)}, which will
create either an HA- or non-HA-enabled client proxy as appropriate.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.NameNodeProxies -->
<!-- start class org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo -->
<class name="NameNodeProxies.ProxyAndInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ProxyAndInfo" type="PROXYTYPE, org.apache.hadoop.io.Text, java.net.InetSocketAddress"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getProxy" return="PROXYTYPE"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDelegationTokenService" return="org.apache.hadoop.io.Text"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAddress" return="java.net.InetSocketAddress"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Wrapper for a client proxy as well as its associated service ID.
This is simply used as a tuple-like return type for
{@link NameNodeProxies#createProxy} and
{@link NameNodeProxies#createNonHAProxy}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo -->
<!-- start interface org.apache.hadoop.hdfs.RemotePeerFactory -->
<interface name="RemotePeerFactory" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="newConnectedPeer" return="org.apache.hadoop.hdfs.net.Peer"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="addr" type="java.net.InetSocketAddress"/>
<param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
<param name="datanodeId" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@param addr The address to connect to.
@param blockToken Token used during optional SASL negotiation
@param datanodeId ID of destination DataNode
@return A new Peer connected to the address.
@throws IOException If there was an error connecting or creating
the remote socket, encrypted stream, etc.]]>
</doc>
</method>
</interface>
<!-- end interface org.apache.hadoop.hdfs.RemotePeerFactory -->
<!-- start class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
<class name="UnknownCipherSuiteException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnknownCipherSuiteException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Thrown when an unknown cipher suite is encountered.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
<!-- start class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
<class name="UnknownCryptoProtocolVersionException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnknownCryptoProtocolVersionException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="UnknownCryptoProtocolVersionException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
</class>
<!-- end class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
<doc>
<![CDATA[<p>A distributed implementation of {@link
org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
<p>The most important difference is that unlike GFS, Hadoop DFS files
have strictly one writer at any one time. Bytes are always appended
to the end of the writer's stream. There is no notion of "record appends"
or "mutations" that are then checked or reordered. Writers simply emit
a byte stream. That byte stream is guaranteed to be stored in the
order written.</p>]]>
</doc>
</package>
<package name="org.apache.hadoop.hdfs.client">
<!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions -->
<class name="BlockReportOptions" extends="java.lang.Object"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="isIncremental" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Options that can be specified when manually triggering a block report.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.BlockReportOptions -->
<!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions.Factory -->
<class name="BlockReportOptions.Factory" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Factory"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setIncremental" return="org.apache.hadoop.hdfs.client.BlockReportOptions.Factory"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="incremental" type="boolean"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.client.BlockReportOptions"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.client.BlockReportOptions.Factory -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsAdmin -->
<class name="HdfsAdmin" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsAdmin" type="java.net.URI, org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new HdfsAdmin client.
@param uri the unique URI of the HDFS file system to administer
@param conf configuration
@throws IOException in the event the file system could not be created]]>
</doc>
</constructor>
<method name="setQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="quota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the namespace quota (count of files, directories, and sym links) for a
directory.
@param src the path to set the quota for
@param quota the value to set for the quota
@throws IOException in the event of error]]>
</doc>
</method>
<method name="clearQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Clear the namespace quota (count of files, directories and sym links) for a
directory.
@param src the path to clear the quota of
@throws IOException in the event of error]]>
</doc>
</method>
<method name="setSpaceQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="spaceQuota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the storage space quota (size of files) for a directory. Note that
directories and sym links do not occupy storage space.
@param src the path to set the space quota of
@param spaceQuota the value to set for the space quota
@throws IOException in the event of error]]>
</doc>
</method>
<method name="clearSpaceQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Clear the storage space quota (size of files) for a directory. Note that
directories and sym links do not occupy storage space.
@param src the path to clear the space quota of
@throws IOException in the event of error]]>
</doc>
</method>
<method name="setQuotaByStorageType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="type" type="org.apache.hadoop.fs.StorageType"/>
<param name="quota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the quota by storage type for a directory. Note that
directories and sym links do not occupy storage type quota.
@param src the target directory to set the quota by storage type
@param type the storage type to set for quota by storage type
@param quota the value to set for quota by storage type
@throws IOException in the event of error]]>
</doc>
</method>
<method name="clearQuotaByStorageType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="type" type="org.apache.hadoop.fs.StorageType"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Clear the space quota by storage type for a directory. Note that
directories and sym links do not occupy storage type quota.
@param src the target directory to clear the quota by storage type
@param type the storage type to clear for quota by storage type
@throws IOException in the event of error]]>
</doc>
</method>
<method name="allowSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Allow snapshot on a directory.
@param path The path of the directory where snapshots will be taken.]]>
</doc>
</method>
<method name="disallowSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Disallow snapshot on a directory.
@param path The path of the snapshottable directory.]]>
</doc>
</method>
<method name="addCacheDirective" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<param name="flags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Add a new CacheDirectiveInfo.
@param info Information about a directive to add.
@param flags {@link CacheFlag}s to use for this operation.
@return the ID of the directive that was created.
@throws IOException if the directive could not be added]]>
</doc>
</method>
<method name="modifyCacheDirective"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<param name="flags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Modify a CacheDirective.
@param info Information about the directive to modify. You must set the ID
to indicate which CacheDirective you want to modify.
@param flags {@link CacheFlag}s to use for this operation.
@throws IOException if the directive could not be modified]]>
</doc>
</method>
<method name="removeCacheDirective"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Remove a CacheDirective.
@param id identifier of the CacheDirectiveInfo to remove
@throws IOException if the directive could not be removed]]>
</doc>
</method>
<method name="listCacheDirectives" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filter" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[List cache directives. Incrementally fetches results from the server.
@param filter Filter parameters to use when listing the directives, null to
list all directives visible to us.
@return A RemoteIterator which returns CacheDirectiveInfo objects.]]>
</doc>
</method>
<method name="addCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Add a cache pool.
@param info
The request to add a cache pool.
@throws IOException
If the request could not be completed.]]>
</doc>
</method>
<method name="modifyCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Modify an existing cache pool.
@param info
The request to modify a cache pool.
@throws IOException
If the request could not be completed.]]>
</doc>
</method>
<method name="removeCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="poolName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Remove a cache pool.
@param poolName
Name of the cache pool to remove.
@throws IOException
if the cache pool did not exist, or could not be removed.]]>
</doc>
</method>
<method name="listCachePools" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[List all cache pools.
@return A remote iterator from which you can get CachePoolEntry objects.
Requests will be made as needed.
@throws IOException
If there was an error listing cache pools.]]>
</doc>
</method>
<method name="createEncryptionZone"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="keyName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
<exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
<doc>
<![CDATA[Create an encryption zone rooted at an empty existing directory, using the
specified encryption key. An encryption zone has an associated encryption
key used when reading and writing files within the zone.
@param path The path of the root of the encryption zone. Must refer to
an empty, existing directory.
@param keyName Name of key available at the KeyProvider.
@throws IOException if there was a general IO exception
@throws AccessControlException if the caller does not have access to path
@throws FileNotFoundException if the path does not exist]]>
</doc>
</method>
<method name="getEncryptionZoneForPath" return="org.apache.hadoop.hdfs.protocol.EncryptionZone"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
<exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
<doc>
<![CDATA[Get the path of the encryption zone for a given file or directory.
@param path The path to get the ez for.
@return The EncryptionZone of the ez, or null if path is not in an ez.
@throws IOException if there was a general IO exception
@throws AccessControlException if the caller does not have access to path
@throws FileNotFoundException if the path does not exist]]>
</doc>
</method>
<method name="listEncryptionZones" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns a RemoteIterator which can be used to list the encryption zones
in HDFS. For large numbers of encryption zones, the iterator will fetch
the list of zones in a number of small batches.
<p/>
Since the list is fetched in batches, it does not represent a
consistent snapshot of the entire list of encryption zones.
<p/>
This method can only be called by HDFS superusers.]]>
</doc>
</method>
<method name="getInotifyEventStream" return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Exposes a stream of namesystem events. Only events occurring after the
stream is created are available.
See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
for information on stream usage.
See {@link org.apache.hadoop.hdfs.inotify.Event}
for information on the available events.
<p/>
Inotify users may want to tune the following HDFS parameters to
ensure that enough extra HDFS edits are saved to support inotify clients
that fall behind the current state of the namespace while reading events.
The default parameter values should generally be reasonable. If edits are
deleted before their corresponding events can be read, clients will see a
{@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
{@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
It should generally be sufficient to tune these parameters:
dfs.namenode.num.extra.edits.retained
dfs.namenode.max.extra.edits.segments.retained
Parameters that affect the number of created segments and the number of
edits that are considered necessary, i.e. do not count towards the
dfs.namenode.num.extra.edits.retained quota):
dfs.namenode.checkpoint.period
dfs.namenode.checkpoint.txns
dfs.namenode.num.checkpoints.retained
dfs.ha.log-roll.period
<p/>
It is recommended that local journaling be configured
(dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
so that edit transfers from the shared journal can be avoided.
@throws IOException If there was an error obtaining the stream.]]>
</doc>
</method>
<method name="getInotifyEventStream" return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="lastReadTxid" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
users who are aware of HDFS edits up to lastReadTxid (e.g. because they
have access to an FSImage inclusive of lastReadTxid) and only want to read
events after this point.]]>
</doc>
</method>
<method name="setStoragePolicy"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="policyName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the source path to the specified storage policy.
@param src The source path referring to either a directory or a file.
@param policyName The name of the storage policy.]]>
</doc>
</method>
<doc>
<![CDATA[The public API for performing administrative functions on HDFS. Those writing
applications against HDFS should prefer this interface to directly accessing
functionality in DistributedFileSystem or DFSClient.
Note that this is distinct from the similarly-named {@link DFSAdmin}, which
is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
commands.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsAdmin -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
<class name="HdfsDataInputStream" extends="org.apache.hadoop.fs.FSDataInputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataInputStream" type="org.apache.hadoop.crypto.CryptoInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getWrappedStream" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get a reference to the wrapped output stream. We always want to return the
actual underlying InputStream, even when we're using a CryptoStream. e.g.
in the delegated methods below.
@return the underlying output stream]]>
</doc>
</method>
<method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the datanode from which the stream is currently reading.]]>
</doc>
</method>
<method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.ExtendedBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the block containing the target position.]]>
</doc>
</method>
<method name="getAllBlocks" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the collection of blocks that has already been located.]]>
</doc>
</method>
<method name="getVisibleLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the visible length of the file. It will include the length of the last
block even if that is in UnderConstruction state.
@return The visible length of the file.]]>
</doc>
</method>
<method name="getReadStatistics" return="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get statistics about the reads which this DFSInputStream has done.
Note that because HdfsDataInputStream is buffered, these stats may
be higher than you would expect just by adding up the number of
bytes read through HdfsDataInputStream.]]>
</doc>
</method>
<method name="clearReadStatistics"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[The Hdfs implementation of {@link FSDataInputStream}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
<class name="HdfsDataOutputStream" extends="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.hdfs.DFSOutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.hdfs.DFSOutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.crypto.CryptoOutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.crypto.CryptoOutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getCurrentBlockReplication" return="int"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the actual number of replicas of the current block.
This can be different from the designated replication factor of the file
because the namenode does not maintain replication for the blocks which are
currently being written to. Depending on the configuration, the client may
continue to write to a block even if a few datanodes in the write pipeline
have failed, or the client may add a new datanodes once a datanode has
failed.
@return the number of valid replicas of the current block]]>
</doc>
</method>
<method name="hsync"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="syncFlags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Sync buffered data to DataNodes (flush to disk devices).
@param syncFlags
Indicate the detailed semantic and actions of the hsync.
@throws IOException
@see FSDataOutputStream#hsync()]]>
</doc>
</method>
<doc>
<![CDATA[The Hdfs implementation of {@link FSDataOutputStream}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag -->
<class name="HdfsDataOutputStream.SyncFlag" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsUtils -->
<class name="HdfsUtils" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsUtils"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isHealthy" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="uri" type="java.net.URI"/>
<doc>
<![CDATA[Is the HDFS healthy?
HDFS is considered as healthy if it is up and not in safemode.
@param uri the HDFS URI. Note that the URI path is ignored.
@return true if HDFS is healthy; false, otherwise.]]>
</doc>
</method>
<doc>
<![CDATA[The public utility API for HDFS.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsUtils -->
</package>
<package name="org.apache.hadoop.hdfs.inotify">
<!-- start class org.apache.hadoop.hdfs.inotify.Event -->
<class name="Event" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event" type="org.apache.hadoop.hdfs.inotify.Event.EventType"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getEventType" return="org.apache.hadoop.hdfs.inotify.Event.EventType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Events sent by the inotify system. Note that no events are necessarily sent
when a file is opened for read (although a MetadataUpdateEvent will be sent
if the atime is updated).]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.AppendEvent -->
<class name="Event.AppendEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toNewBlock" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Sent when an existing file is opened for append.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.AppendEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.AppendEvent.Builder -->
<class name="Event.AppendEvent.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="path" return="org.apache.hadoop.hdfs.inotify.Event.AppendEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
</method>
<method name="newBlock" return="org.apache.hadoop.hdfs.inotify.Event.AppendEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="newBlock" type="boolean"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.inotify.Event.AppendEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.AppendEvent.Builder -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.CloseEvent -->
<class name="Event.CloseEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CloseEvent" type="java.lang.String, long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFileSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The size of the closed file in bytes. May be -1 if the size is not
available (e.g. in the case of a close generated by a concat operation).]]>
</doc>
</method>
<method name="getTimestamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The time when this event occurred, in milliseconds since the epoch.]]>
</doc>
</method>
<doc>
<![CDATA[Sent when a file is closed after append or create.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.CloseEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.CreateEvent -->
<class name="Event.CreateEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getiNodeType" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getCtime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Creation time of the file, directory, or symlink.]]>
</doc>
</method>
<method name="getReplication" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Replication is zero if the CreateEvent iNodeType is directory or symlink.]]>
</doc>
</method>
<method name="getOwnerName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getGroupName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPerms" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSymlinkTarget" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Symlink target is null if the CreateEvent iNodeType is not symlink.]]>
</doc>
</method>
<method name="getOverwrite" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDefaultBlockSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Sent when a new file is created (including overwrite).]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.CreateEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder -->
<class name="Event.CreateEvent.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="iNodeType" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType"/>
</method>
<method name="path" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
</method>
<method name="ctime" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ctime" type="long"/>
</method>
<method name="replication" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="replication" type="int"/>
</method>
<method name="ownerName" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ownerName" type="java.lang.String"/>
</method>
<method name="groupName" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="groupName" type="java.lang.String"/>
</method>
<method name="perms" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="perms" type="org.apache.hadoop.fs.permission.FsPermission"/>
</method>
<method name="symlinkTarget" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="symlinkTarget" type="java.lang.String"/>
</method>
<method name="overwrite" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="overwrite" type="boolean"/>
</method>
<method name="defaultBlockSize" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="defaultBlockSize" type="long"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType -->
<class name="Event.CreateEvent.INodeType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.EventType -->
<class name="Event.EventType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.inotify.Event.EventType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.inotify.Event.EventType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.EventType -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent -->
<class name="Event.MetadataUpdateEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getMetadataType" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getMtime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAtime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getReplication" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getOwnerName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getGroupName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPerms" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAcls" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The full set of ACLs currently associated with this file or directory.
May be null if all ACLs were removed.]]>
</doc>
</method>
<method name="getxAttrs" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isxAttrsRemoved" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Whether the xAttrs returned by getxAttrs() were removed (as opposed to
added).]]>
</doc>
</method>
<doc>
<![CDATA[Sent when there is an update to directory or file (none of the metadata
tracked here applies to symlinks) that is not associated with another
inotify event. The tracked metadata includes atime/mtime, replication,
owner/group, permissions, ACLs, and XAttributes. Fields not relevant to the
metadataType of the MetadataUpdateEvent will be null or will have their default
values.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder -->
<class name="Event.MetadataUpdateEvent.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="path" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
</method>
<method name="metadataType" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType"/>
</method>
<method name="mtime" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="mtime" type="long"/>
</method>
<method name="atime" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="atime" type="long"/>
</method>
<method name="replication" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="replication" type="int"/>
</method>
<method name="ownerName" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ownerName" type="java.lang.String"/>
</method>
<method name="groupName" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="groupName" type="java.lang.String"/>
</method>
<method name="perms" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="perms" type="org.apache.hadoop.fs.permission.FsPermission"/>
</method>
<method name="acls" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="acls" type="java.util.List"/>
</method>
<method name="xAttrs" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrs" type="java.util.List"/>
</method>
<method name="xAttrsRemoved" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrsRemoved" type="boolean"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType -->
<class name="Event.MetadataUpdateEvent.MetadataType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.RenameEvent -->
<class name="Event.RenameEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getSrcPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDstPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTimestamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The time when this event occurred, in milliseconds since the epoch.]]>
</doc>
</method>
<doc>
<![CDATA[Sent when a file, directory, or symlink is renamed.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.RenameEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.RenameEvent.Builder -->
<class name="Event.RenameEvent.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="srcPath" return="org.apache.hadoop.hdfs.inotify.Event.RenameEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="srcPath" type="java.lang.String"/>
</method>
<method name="dstPath" return="org.apache.hadoop.hdfs.inotify.Event.RenameEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dstPath" type="java.lang.String"/>
</method>
<method name="timestamp" return="org.apache.hadoop.hdfs.inotify.Event.RenameEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="timestamp" type="long"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.inotify.Event.RenameEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.RenameEvent.Builder -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent -->
<class name="Event.UnlinkEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTimestamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The time when this event occurred, in milliseconds since the epoch.]]>
</doc>
</method>
<doc>
<![CDATA[Sent when a file, directory, or symlink is deleted.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent.Builder -->
<class name="Event.UnlinkEvent.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="path" return="org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
</method>
<method name="timestamp" return="org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="timestamp" type="long"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent.Builder -->
<!-- start class org.apache.hadoop.hdfs.inotify.EventBatch -->
<class name="EventBatch" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="EventBatch" type="long, org.apache.hadoop.hdfs.inotify.Event[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getTxid" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getEvents" return="org.apache.hadoop.hdfs.inotify.Event[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A batch of events that all happened on the same transaction ID.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.EventBatch -->
<!-- start class org.apache.hadoop.hdfs.inotify.MissingEventsException -->
<class name="MissingEventsException" extends="java.lang.Exception"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="MissingEventsException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="MissingEventsException" type="long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getExpectedTxid" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getActualTxid" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.MissingEventsException -->
</package>
<package name="org.apache.hadoop.hdfs.net">
</package>
<package name="org.apache.hadoop.hdfs.protocol">
<!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.Builder -->
<class name="BlockListAsLongs.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="add"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="replica" type="org.apache.hadoop.hdfs.server.datanode.Replica"/>
</method>
<method name="getNumberOfBlocks" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="build" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.Builder -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry -->
<class name="CacheDirectiveEntry" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CacheDirectiveEntry" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo, org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStats" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a path-based cache directive entry.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo -->
<class name="CacheDirectiveInfo" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getId" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The ID of this directive.]]>
</doc>
</method>
<method name="getPath" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The path used in this request.]]>
</doc>
</method>
<method name="getReplication" return="java.lang.Short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The number of times the block should be cached.]]>
</doc>
</method>
<method name="getPool" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The pool used in this request.]]>
</doc>
</method>
<method name="getExpiration" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return When this directive expires.]]>
</doc>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a path-based cache directive.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Builder -->
<class name="CacheDirectiveInfo.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Creates an empty builder.]]>
</doc>
</constructor>
<constructor name="Builder" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Creates a builder with all elements set to the same values as the
given CacheDirectiveInfo.]]>
</doc>
</constructor>
<method name="build" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Builds a new CacheDirectiveInfo populated with the set properties.
@return New CacheDirectiveInfo.]]>
</doc>
</method>
<method name="setId" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="java.lang.Long"/>
<doc>
<![CDATA[Sets the id used in this request.
@param id The id used in this request.
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="setPath" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<doc>
<![CDATA[Sets the path used in this request.
@param path The path used in this request.
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="setReplication" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="replication" type="java.lang.Short"/>
<doc>
<![CDATA[Sets the replication used in this request.
@param replication The replication used in this request.
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="setPool" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pool" type="java.lang.String"/>
<doc>
<![CDATA[Sets the pool used in this request.
@param pool The pool used in this request.
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="setExpiration" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="expiration" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"/>
<doc>
<![CDATA[Sets when the CacheDirective should expire. A
{@link CacheDirectiveInfo.Expiration} can specify either an absolute or
relative expiration time.
@param expiration when this CacheDirective should expire
@return This builder, for call chaining]]>
</doc>
</method>
<doc>
<![CDATA[A builder for creating new CacheDirectiveInfo instances.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Builder -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration -->
<class name="CacheDirectiveInfo.Expiration" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="newRelative" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="ms" type="long"/>
<doc>
<![CDATA[Create a new relative Expiration.
<p>
Use {@link Expiration#NEVER} to indicate an Expiration that never
expires.
@param ms how long until the CacheDirective expires, in milliseconds
@return A relative Expiration]]>
</doc>
</method>
<method name="newAbsolute" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="date" type="java.util.Date"/>
<doc>
<![CDATA[Create a new absolute Expiration.
<p>
Use {@link Expiration#NEVER} to indicate an Expiration that never
expires.
@param date when the CacheDirective expires
@return An absolute Expiration]]>
</doc>
</method>
<method name="newAbsolute" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="ms" type="long"/>
<doc>
<![CDATA[Create a new absolute Expiration.
<p>
Use {@link Expiration#NEVER} to indicate an Expiration that never
expires.
@param ms when the CacheDirective expires, in milliseconds since the Unix
epoch.
@return An absolute Expiration]]>
</doc>
</method>
<method name="isRelative" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true if Expiration was specified as a relative duration, false if
specified as an absolute time.]]>
</doc>
</method>
<method name="getMillis" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The raw underlying millisecond value, either a relative duration
or an absolute time as milliseconds since the Unix epoch.]]>
</doc>
</method>
<method name="getAbsoluteDate" return="java.util.Date"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Expiration time as a {@link Date} object. This converts a
relative Expiration into an absolute Date based on the local
clock.]]>
</doc>
</method>
<method name="getAbsoluteMillis" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Expiration time in milliseconds from the Unix epoch. This
converts a relative Expiration into an absolute time based on the
local clock.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="MAX_RELATIVE_EXPIRY_MS" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The maximum value we accept for a relative expiry.]]>
</doc>
</field>
<field name="NEVER" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[An relative Expiration that never expires.]]>
</doc>
</field>
<doc>
<![CDATA[Denotes a relative or absolute expiration time for a CacheDirective. Use
factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} and
{@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an
Expiration.
<p>
In either case, the server-side clock is used to determine when a
CacheDirective expires.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveStats -->
<class name="CacheDirectiveStats" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getBytesNeeded" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The bytes needed.]]>
</doc>
</method>
<method name="getBytesCached" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The bytes cached.]]>
</doc>
</method>
<method name="getFilesNeeded" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The number of files needed.]]>
</doc>
</method>
<method name="getFilesCached" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The number of files cached.]]>
</doc>
</method>
<method name="hasExpired" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Whether this directive has expired.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a path-based cache directive.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveStats -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveStats.Builder -->
<class name="CacheDirectiveStats.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Creates an empty builder.]]>
</doc>
</constructor>
<method name="build" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Builds a new CacheDirectiveStats populated with the set properties.
@return New CacheDirectiveStats.]]>
</doc>
</method>
<method name="setBytesNeeded" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesNeeded" type="long"/>
<doc>
<![CDATA[Sets the bytes needed by this directive.
@param bytesNeeded The bytes needed.
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="setBytesCached" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesCached" type="long"/>
<doc>
<![CDATA[Sets the bytes cached by this directive.
@param bytesCached The bytes cached.
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="setFilesNeeded" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filesNeeded" type="long"/>
<doc>
<![CDATA[Sets the files needed by this directive.
@param filesNeeded The number of files needed
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="setFilesCached" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filesCached" type="long"/>
<doc>
<![CDATA[Sets the files cached by this directive.
@param filesCached The number of files cached.
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="setHasExpired" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="hasExpired" type="boolean"/>
<doc>
<![CDATA[Sets whether this directive has expired.
@param hasExpired if this directive has expired
@return This builder, for call chaining.]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveStats.Builder -->
<!-- start class org.apache.hadoop.hdfs.protocol.CachePoolEntry -->
<class name="CachePoolEntry" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CachePoolEntry" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo, org.apache.hadoop.hdfs.protocol.CachePoolStats"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStats" return="org.apache.hadoop.hdfs.protocol.CachePoolStats"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a Cache Pool entry.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CachePoolEntry -->
<!-- start class org.apache.hadoop.hdfs.protocol.CachePoolInfo -->
<class name="CachePoolInfo" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CachePoolInfo" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPoolName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Name of the pool.]]>
</doc>
</method>
<method name="getOwnerName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The owner of the pool. Along with the group and mode, determines
who has access to view and modify the pool.]]>
</doc>
</method>
<method name="setOwnerName" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ownerName" type="java.lang.String"/>
</method>
<method name="getGroupName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The group of the pool. Along with the owner and mode, determines
who has access to view and modify the pool.]]>
</doc>
</method>
<method name="setGroupName" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="groupName" type="java.lang.String"/>
</method>
<method name="getMode" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Unix-style permissions of the pool. Along with the owner and group,
determines who has access to view and modify the pool.]]>
</doc>
</method>
<method name="setMode" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="mode" type="org.apache.hadoop.fs.permission.FsPermission"/>
</method>
<method name="getLimit" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The maximum aggregate number of bytes that can be cached by
directives in this pool.]]>
</doc>
</method>
<method name="setLimit" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytes" type="java.lang.Long"/>
</method>
<method name="getMaxRelativeExpiryMs" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The maximum relative expiration of directives of this pool in
milliseconds]]>
</doc>
</method>
<method name="setMaxRelativeExpiryMs" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ms" type="java.lang.Long"/>
<doc>
<![CDATA[Set the maximum relative expiration of directives of this pool in
milliseconds.
@param ms in milliseconds
@return This builder, for call chaining.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="validate"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="validateName"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="poolName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="RELATIVE_EXPIRY_NEVER" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Indicates that the pool does not have a maximum relative expiry.]]>
</doc>
</field>
<field name="DEFAULT_MAX_RELATIVE_EXPIRY" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default max relative expiry for cache pools.]]>
</doc>
</field>
<field name="LIMIT_UNLIMITED" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DEFAULT_LIMIT" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[CachePoolInfo describes a cache pool.
This class is used in RPCs to create and modify cache pools.
It is serializable and can be stored in the edit log.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CachePoolInfo -->
<!-- start class org.apache.hadoop.hdfs.protocol.CachePoolStats -->
<class name="CachePoolStats" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getBytesNeeded" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesCached" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesOverlimit" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFilesNeeded" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFilesCached" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[CachePoolStats describes cache pool statistics.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CachePoolStats -->
<!-- start class org.apache.hadoop.hdfs.protocol.CachePoolStats.Builder -->
<class name="CachePoolStats.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setBytesNeeded" return="org.apache.hadoop.hdfs.protocol.CachePoolStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesNeeded" type="long"/>
</method>
<method name="setBytesCached" return="org.apache.hadoop.hdfs.protocol.CachePoolStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesCached" type="long"/>
</method>
<method name="setBytesOverlimit" return="org.apache.hadoop.hdfs.protocol.CachePoolStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesOverlimit" type="long"/>
</method>
<method name="setFilesNeeded" return="org.apache.hadoop.hdfs.protocol.CachePoolStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filesNeeded" type="long"/>
</method>
<method name="setFilesCached" return="org.apache.hadoop.hdfs.protocol.CachePoolStats.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filesCached" type="long"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.protocol.CachePoolStats"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CachePoolStats.Builder -->
<!-- start class org.apache.hadoop.hdfs.protocol.CorruptFileBlocks -->
<class name="CorruptFileBlocks" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CorruptFileBlocks"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="CorruptFileBlocks" type="java.lang.String[], java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getFiles" return="java.lang.String[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getCookie" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Contains a list of paths corresponding to corrupt files and a cookie
used for iterative calls to NameNode.listCorruptFileBlocks.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CorruptFileBlocks -->
<!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
<class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="fromValue" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="value" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
<!-- start class org.apache.hadoop.hdfs.protocol.EncryptionZone -->
<class name="EncryptionZone" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="EncryptionZone" type="long, java.lang.String, org.apache.hadoop.crypto.CipherSuite, org.apache.hadoop.crypto.CryptoProtocolVersion, java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSuite" return="org.apache.hadoop.crypto.CipherSuite"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getVersion" return="org.apache.hadoop.crypto.CryptoProtocolVersion"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getKeyName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A simple class for representing an encryption zone. Presently an encryption
zone only has a path (the root of the encryption zone), a key name, and a
unique id. The id is used to implement batched listing of encryption zones.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.EncryptionZone -->
<!-- start class org.apache.hadoop.hdfs.protocol.FSConstants -->
<class name="FSConstants" extends="org.apache.hadoop.hdfs.protocol.HdfsConstants"
abstract="true"
static="false" final="false" visibility="public"
deprecated="Please use {@link HdfsConstants}. This class
is left only for other ecosystem projects which depended on
it for SafemodeAction and DatanodeReport types.">
<constructor name="FSConstants"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[@deprecated Please use {@link HdfsConstants}. This class
is left only for other ecosystem projects which depended on
it for SafemodeAction and DatanodeReport types.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.FSConstants -->
<!-- start class org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException -->
<class name="FSLimitException.MaxDirectoryItemsExceededException" extends="org.apache.hadoop.hdfs.protocol.FSLimitException"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="MaxDirectoryItemsExceededException"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<constructor name="MaxDirectoryItemsExceededException" type="java.lang.String"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<constructor name="MaxDirectoryItemsExceededException" type="long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getMessage" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="serialVersionUID" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Directory has too many items]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException -->
<!-- start class org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException -->
<class name="FSLimitException.PathComponentTooLongException" extends="org.apache.hadoop.hdfs.protocol.FSLimitException"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="PathComponentTooLongException"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<constructor name="PathComponentTooLongException" type="java.lang.String"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<constructor name="PathComponentTooLongException" type="long, long, java.lang.String, java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getMessage" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="serialVersionUID" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Path component length is too long]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException -->
<!-- start class org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType -->
<class name="HdfsConstants.DatanodeReportType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType -->
<!-- start class org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction -->
<class name="HdfsConstants.RollingUpgradeAction" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="fromString" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="s" type="java.lang.String"/>
<doc>
<![CDATA[Covert the given String to a RollingUpgradeAction.]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction -->
<!-- start class org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction -->
<class name="HdfsConstants.SafeModeAction" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction -->
<!-- start class org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature -->
<class name="LayoutVersion.Feature" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature"/>
<method name="values" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Enums for features that change the layout version before rolling
upgrade is supported.
<br><br>
To add a new layout version:
<ul>
<li>Define a new enum constant with a short enum name, the new layout version
and description of the added feature.</li>
<li>When adding a layout version with an ancestor that is not same as
its immediate predecessor, use the constructor where a specific ancestor
can be passed.
</li>
</ul>]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature -->
<!-- start class org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo -->
<class name="LayoutVersion.FeatureInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="FeatureInfo" type="int, int, java.lang.String, boolean, org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getLayoutVersion" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Accessor method for feature layout version
@return int lv value]]>
</doc>
</method>
<method name="getAncestorLayoutVersion" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Accessor method for feature ancestor layout version
@return int ancestor LV value]]>
</doc>
</method>
<method name="getDescription" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Accessor method for feature description
@return String feature description]]>
</doc>
</method>
<method name="isReservedForOldRelease" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSpecialFeatures" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Feature information.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo -->
<!-- start interface org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature -->
<interface name="LayoutVersion.LayoutFeature" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[The interface to be implemented by NameNode and DataNode layout features]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature -->
<!-- start class org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo.Bean -->
<class name="RollingUpgradeInfo.Bean" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Bean" type="org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getBlockPoolId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStartTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFinalizeTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isCreatedRollbackImages" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo.Bean -->
<!-- start class org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException -->
<class name="SnapshotAccessControlException" extends="org.apache.hadoop.security.AccessControlException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SnapshotAccessControlException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="SnapshotAccessControlException" type="java.lang.Throwable"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Snapshot access related exception.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException -->
<!-- start class org.apache.hadoop.hdfs.protocol.SnapshotDiffReport -->
<class name="SnapshotDiffReport" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SnapshotDiffReport" type="java.lang.String, java.lang.String, java.lang.String, java.util.List"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getSnapshotRoot" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return {@link #snapshotRoot}]]>
</doc>
</method>
<method name="getFromSnapshot" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return {@link #fromSnapshot}]]>
</doc>
</method>
<method name="getLaterSnapshotName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return {@link #toSnapshot}]]>
</doc>
</method>
<method name="getDiffList" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return {@link #diffList}]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[This class represents to end users the difference between two snapshots of
the same directory, or the difference between a snapshot of the directory and
its current state. Instead of capturing all the details of the diff, this
class only lists where the changes happened and their types.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.SnapshotDiffReport -->
<!-- start class org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry -->
<class name="SnapshotDiffReport.DiffReportEntry" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DiffReportEntry" type="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType, byte[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="DiffReportEntry" type="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType, byte[][]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="DiffReportEntry" type="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType, byte[], byte[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="DiffReportEntry" type="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType, byte[][], byte[][]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getType" return="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSourcePath" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTargetPath" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="other" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Representing the full path and diff type of a file/directory where changes
have happened.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry -->
<!-- start class org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType -->
<class name="SnapshotDiffReport.DiffType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getLabel" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTypeFromLabel" return="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="label" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Types of the difference, which include CREATE, MODIFY, DELETE, and RENAME.
Each type has a label for representation: +/M/-/R represent CREATE, MODIFY,
DELETE, and RENAME respectively.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType -->
<!-- start class org.apache.hadoop.hdfs.protocol.SnapshotException -->
<class name="SnapshotException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SnapshotException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="SnapshotException" type="java.lang.Throwable"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Snapshot related exception.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.SnapshotException -->
<!-- start class org.apache.hadoop.hdfs.protocol.SnapshotInfo.Bean -->
<class name="SnapshotInfo.Bean" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Bean" type="java.lang.String, java.lang.String, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getSnapshotID" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshotDirectory" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getModificationTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.SnapshotInfo.Bean -->
<!-- start class org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus -->
<class name="SnapshottableDirectoryStatus" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SnapshottableDirectoryStatus" type="long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, byte[], long, int, int, int, byte[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getSnapshotNumber" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Number of snapshots that have been taken for the directory]]>
</doc>
</method>
<method name="getSnapshotQuota" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Number of snapshots allowed for the directory]]>
</doc>
</method>
<method name="getParentFullPath" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Full path of the parent]]>
</doc>
</method>
<method name="getDirStatus" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The basic information of the directory]]>
</doc>
</method>
<method name="getFullPath" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return Full path of the file]]>
</doc>
</method>
<method name="print"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="stats" type="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[]"/>
<param name="out" type="java.io.PrintStream"/>
<doc>
<![CDATA[Print a list of {@link SnapshottableDirectoryStatus} out to a given stream.
@param stats The list of {@link SnapshottableDirectoryStatus}
@param out The given stream for printing.]]>
</doc>
</method>
<field name="COMPARATOR" type="java.util.Comparator"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Compare the statuses by full paths.]]>
</doc>
</field>
<doc>
<![CDATA[Metadata about a snapshottable directory]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus -->
<!-- start class org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus.Bean -->
<class name="SnapshottableDirectoryStatus.Bean" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Bean" type="java.lang.String, int, int, long, short, java.lang.String, java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshotNumber" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshotQuota" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getModificationTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPermission" return="short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getOwner" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getGroup" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus.Bean -->
</package>
<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
<!-- start class org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.ECN -->
<class name="PipelineAck.ECN" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.ECN[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.ECN"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getValue" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.ECN -->
<!-- start class org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy -->
<class name="ReplaceDatanodeOnFailure.Policy" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[The replacement policies]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy -->
<!-- start class org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver -->
<class name="TrustedChannelResolver" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.conf.Configurable"/>
<constructor name="TrustedChannelResolver"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInstance" return="org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[Returns an instance of TrustedChannelResolver.
Looks up the configuration to see if there is custom class specified.
@param conf
@return TrustedChannelResolver]]>
</doc>
</method>
<method name="setConf"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
</method>
<method name="getConf" return="org.apache.hadoop.conf.Configuration"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isTrusted" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return boolean value indicating whether a channel is trusted or not
from a client's perspective.
@return true if the channel is trusted and false otherwise.]]>
</doc>
</method>
<method name="isTrusted" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="peerAddress" type="java.net.InetAddress"/>
<doc>
<![CDATA[Identify boolean value indicating whether a channel is trusted or not.
@param peerAddress address of the peer
@return true if the channel is trusted and false otherwise.]]>
</doc>
</method>
<doc>
<![CDATA[Class used to indicate whether a channel is trusted or not.
The default implementation is to return false indicating that
the channel is not trusted.
This class can be overridden to provide custom logic to determine
whether a channel is trusted or not.
The custom class can be specified via configuration.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver -->
<!-- start class org.apache.hadoop.hdfs.protocol.datatransfer.WhitelistBasedTrustedChannelResolver -->
<class name="WhitelistBasedTrustedChannelResolver" extends="org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="WhitelistBasedTrustedChannelResolver"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setConf"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
</method>
<method name="isTrusted" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isTrusted" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="clientAddress" type="java.net.InetAddress"/>
</method>
<field name="DFS_DATATRANSFER_SERVER_FIXEDWHITELIST_FILE" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Path to the file to containing subnets and ip addresses to form fixed whitelist.]]>
</doc>
</field>
<field name="DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_ENABLE" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Enables/Disables variable whitelist]]>
</doc>
</field>
<field name="DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_FILE" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Path to the file to containing subnets and ip addresses to form variable whitelist.]]>
</doc>
</field>
<field name="DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_CACHE_SECS" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[time in seconds by which the variable whitelist file is checked for updates]]>
</doc>
</field>
<field name="DFS_DATATRANSFER_CLIENT_FIXEDWHITELIST_FILE" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Path to the file to containing subnets and ip addresses to form fixed whitelist.]]>
</doc>
</field>
<field name="DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_ENABLE" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Enables/Disables variable whitelist]]>
</doc>
</field>
<field name="DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_FILE" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Path to the file to containing subnets and ip addresses to form variable whitelist.]]>
</doc>
</field>
<field name="DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_CACHE_SECS" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[time in seconds by which the variable whitelist file is checked for updates]]>
</doc>
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.datatransfer.WhitelistBasedTrustedChannelResolver -->
</package>
<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
</package>
<package name="org.apache.hadoop.hdfs.protocolPB">
<!-- start class org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB -->
<class name="DatanodeProtocolServerSideTranslatorPB" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB"/>
<constructor name="DatanodeProtocolServerSideTranslatorPB" type="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="registerDatanode" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="sendHeartbeat" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="blockReport" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="cacheReport" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="blockReceivedAndDeleted" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="errorReport" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="versionRequest" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="reportBadBlocks" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="commitBlockSynchronization" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB -->
<!-- start class org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB -->
<class name="NamenodeProtocolServerSideTranslatorPB" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB"/>
<constructor name="NamenodeProtocolServerSideTranslatorPB" type="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="getBlockKeys" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="getTransactionId" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="getMostRecentCheckpointTxId" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="rollEditLog" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="errorReport" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="registerSubordinateNamenode" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="startCheckpoint" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="endCheckpoint" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="getEditLogManifest" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="unused" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="versionRequest" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<method name="isUpgradeFinalized" return="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="controller" type="com.google.protobuf.RpcController"/>
<param name="request" type="org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto"/>
<exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
</method>
<doc>
<![CDATA[Implementation for protobuf service that forwards requests
received on {@link NamenodeProtocolPB} to the
{@link NamenodeProtocol} server implementation.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB -->
<!-- start class org.apache.hadoop.hdfs.protocolPB.PBHelper -->
<class name="PBHelper" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getByteString" return="com.google.protobuf.ByteString"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytes" type="byte[]"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="role" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="role" type="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole"/>
</method>
<method name="convertStoragePolicies" return="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="policyProtos" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="policy" type="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="types" type="org.apache.hadoop.fs.StorageType[]"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.common.StorageInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto"/>
<param name="type" type="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="reg" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="reg" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DatanodeID"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dn" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="did" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="did" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto[]"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.Block"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="blk" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="blks" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="key" type="org.apache.hadoop.hdfs.security.token.block.BlockKey"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.security.token.block.BlockKey"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="k" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="keys" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="keys" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="s" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="s" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="log" type="org.apache.hadoop.hdfs.server.protocol.RemoteEditLog"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.RemoteEditLog"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="l" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="manifest" type="org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="manifest" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cmd" type="org.apache.hadoop.hdfs.server.protocol.CheckpointCommand"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cmd" type="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"/>
</method>
<method name="convertBlockKeys" return="org.apache.hadoop.hdfs.security.token.block.BlockKey[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="list" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cmd" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.ExtendedBlock"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="eb" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="inAs" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="di" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto"/>
</method>
<method name="convertDatanodeInfo" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="di" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="di" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto[]"/>
</method>
<method name="convert" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dnInfos" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
</method>
<method name="convert" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dnInfos" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
<param name="startIdx" type="int"/>
<doc>
<![CDATA[Copy from {@code dnInfos} to a target of list of same size starting at
{@code startIdx}.]]>
</doc>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="list" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
</method>
<method name="convertDatanodeStorageReport" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="report" type="org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport"/>
</method>
<method name="convertDatanodeStorageReports" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="reports" type="org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[]"/>
</method>
<method name="convertDatanodeStorageReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto"/>
</method>
<method name="convertDatanodeStorageReports" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="protos" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="adminState" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto"/>
</method>
<method name="convert" return="org.apache.hadoop.security.proto.SecurityProtos.TokenProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="tok" type="org.apache.hadoop.security.token.Token"/>
</method>
<method name="convert" return="org.apache.hadoop.security.token.Token"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockToken" type="org.apache.hadoop.security.proto.SecurityProtos.TokenProto"/>
</method>
<method name="convertDelegationToken" return="org.apache.hadoop.security.token.Token"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockToken" type="org.apache.hadoop.security.proto.SecurityProtos.TokenProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="state" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="state" type="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="bbCmd" type="org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cmd" type="org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cmd" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cmd" type="org.apache.hadoop.hdfs.server.protocol.FinalizeCommand"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cmd" type="org.apache.hadoop.hdfs.server.protocol.BlockCommand"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cmd" type="org.apache.hadoop.hdfs.server.protocol.BlockIdCommand"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="datanodeCommand" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="keyUpdateCmd" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.FinalizeCommand"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="finalizeCmd" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="recoveryCmd" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.BlockCommand"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="blkCmd" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.BlockIdCommand"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="blkIdCmd" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="datanodeInfosProto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="balancerCmd" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="receivedDeletedBlockInfo" type="org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"/>
</method>
<method name="convertLocatedBlock" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="lb" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
</method>
<method name="convertLocatedBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="lb" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto[]"/>
</method>
<method name="convertLocatedBlock" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="lb" type="java.util.List"/>
</method>
<method name="convertLocatedBlock2" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="lb" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="lb" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="lb" type="org.apache.hadoop.hdfs.protocol.LocatedBlocks"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="bet" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="bet" type="org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey"/>
</method>
<method name="convert" return="org.apache.hadoop.fs.FsServerDefaults"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fs" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fs" type="org.apache.hadoop.fs.FsServerDefaults"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.permission.FsPermission"/>
</method>
<method name="convert" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto"/>
</method>
<method name="convertCreateFlag" return="int"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
</method>
<method name="convertCreateFlag" return="org.apache.hadoop.io.EnumSetWritable"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="flag" type="int"/>
</method>
<method name="convertCacheFlags" return="int"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="flags" type="java.util.EnumSet"/>
</method>
<method name="convertCacheFlags" return="java.util.EnumSet"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="flags" type="int"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fs" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="sdirStatusProto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fs" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="status" type="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fs" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[]"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fs" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto[]"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dl" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="d" type="org.apache.hadoop.hdfs.protocol.DirectoryListing"/>
</method>
<method name="convert" return="long[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="res" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fsStats" type="long[]"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="t" type="org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="t" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto"/>
</method>
<method name="convertRollingUpgradeStatus" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="status" type="org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.CorruptFileBlocks"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="c" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="c" type="org.apache.hadoop.hdfs.protocol.CorruptFileBlocks"/>
</method>
<method name="convert" return="org.apache.hadoop.fs.ContentSummary"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cs" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="cs" type="org.apache.hadoop.fs.ContentSummary"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="s" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="hb" type="org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="s" type="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage"/>
</method>
<method name="convertStorageTypes" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="types" type="org.apache.hadoop.fs.StorageType[]"/>
</method>
<method name="convertStorageTypes" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="types" type="org.apache.hadoop.fs.StorageType[]"/>
<param name="startIdx" type="int"/>
</method>
<method name="convertStorageType" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.fs.StorageType"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="s" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto"/>
</method>
<method name="convertStorageType" return="org.apache.hadoop.fs.StorageType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto"/>
</method>
<method name="convertStorageTypes" return="org.apache.hadoop.fs.StorageType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="storageTypesList" type="java.util.List"/>
<param name="expectedSize" type="int"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="r" type="org.apache.hadoop.hdfs.server.protocol.StorageReport"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.StorageReport"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto"/>
</method>
<method name="convertStorageReports" return="org.apache.hadoop.hdfs.server.protocol.StorageReport[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="list" type="java.util.List"/>
</method>
<method name="convertStorageReports" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="storages" type="org.apache.hadoop.hdfs.server.protocol.StorageReport[]"/>
</method>
<method name="convertVolumeFailureSummary" return="org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto"/>
</method>
<method name="convertVolumeFailureSummary" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="volumeFailureSummary" type="org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.JournalInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="j" type="org.apache.hadoop.hdfs.server.protocol.JournalInfo"/>
<doc>
<![CDATA[Method used for converting {@link JournalInfoProto} sent from Namenode
to Journal receivers to {@link NamenodeRegistration}.]]>
</doc>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="sdlp" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="status" type="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[]"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="entry" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="entry" type="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="reportProto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="report" type="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport"/>
</method>
<method name="convert" return="org.apache.hadoop.util.DataChecksum.Type"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="expiration" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="stats" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="entry" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.CachePoolInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="stats" type="org.apache.hadoop.hdfs.protocol.CachePoolStats"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.CachePoolStats"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="entry" type="org.apache.hadoop.hdfs.protocol.CachePoolEntry"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.CachePoolEntry"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.util.DataChecksum.Type"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto"/>
</method>
<method name="vintPrefixed" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="input" type="java.io.InputStream"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="v" type="org.apache.hadoop.fs.permission.FsAction"/>
</method>
<method name="convert" return="org.apache.hadoop.fs.permission.FsAction"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="v" type="org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto"/>
</method>
<method name="convertAclEntryProto" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="aclSpec" type="java.util.List"/>
</method>
<method name="convertAclEntry" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="aclSpec" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.fs.permission.AclStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="e" type="org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="e" type="org.apache.hadoop.fs.permission.AclStatus"/>
</method>
<method name="convertXAttrProto" return="org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="org.apache.hadoop.fs.XAttr"/>
</method>
<method name="convertXAttrProto" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrSpec" type="java.util.List"/>
</method>
<method name="convert" return="int"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="flag" type="java.util.EnumSet"/>
<doc>
<![CDATA[The flag field in PB is a bitmask whose values are the same a the
emum values of XAttrSetFlag]]>
</doc>
</method>
<method name="convert" return="java.util.EnumSet"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="flag" type="int"/>
</method>
<method name="convertXAttr" return="org.apache.hadoop.fs.XAttr"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto"/>
</method>
<method name="convertXAttrs" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrSpec" type="java.util.List"/>
</method>
<method name="convert" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto"/>
</method>
<method name="convertXAttrsResponse" return="org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrs" type="java.util.List"/>
</method>
<method name="convert" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto"/>
</method>
<method name="convertListXAttrsResponse" return="org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="names" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="zone" type="org.apache.hadoop.hdfs.protocol.EncryptionZone"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.EncryptionZone"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="slotId" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="shmId" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="slotId" type="org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="shmId" type="org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.inotify.EventBatchList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="resp" type="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="convertEditsResponse" return="org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="el" type="org.apache.hadoop.hdfs.inotify.EventBatchList"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="option" type="org.apache.hadoop.crypto.CipherOption"/>
</method>
<method name="convert" return="org.apache.hadoop.crypto.CipherOption"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto"/>
</method>
<method name="convertCipherOptions" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="options" type="java.util.List"/>
</method>
<method name="convertCipherOptionProtos" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="protos" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="suite" type="org.apache.hadoop.crypto.CipherSuite"/>
</method>
<method name="convert" return="org.apache.hadoop.crypto.CipherSuite"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto"/>
</method>
<method name="convert" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="versions" type="org.apache.hadoop.crypto.CryptoProtocolVersion[]"/>
</method>
<method name="convertCryptoProtocolVersions" return="org.apache.hadoop.crypto.CryptoProtocolVersion[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="protos" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.crypto.CryptoProtocolVersion"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="version" type="org.apache.hadoop.crypto.CryptoProtocolVersion"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.fs.FileEncryptionInfo"/>
</method>
<method name="convertPerFileEncInfo" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.fs.FileEncryptionInfo"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="suite" type="org.apache.hadoop.crypto.CipherSuite"/>
<param name="version" type="org.apache.hadoop.crypto.CryptoProtocolVersion"/>
<param name="keyName" type="java.lang.String"/>
</method>
<method name="convert" return="org.apache.hadoop.fs.FileEncryptionInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto"/>
</method>
<method name="convert" return="org.apache.hadoop.fs.FileEncryptionInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fileProto" type="org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto"/>
<param name="suite" type="org.apache.hadoop.crypto.CipherSuite"/>
<param name="version" type="org.apache.hadoop.crypto.CryptoProtocolVersion"/>
<param name="keyName" type="java.lang.String"/>
</method>
<method name="convert" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="targetPinnings" type="boolean[]"/>
<param name="idx" type="int"/>
</method>
<method name="convertBooleanList" return="boolean[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="targetPinningsList" type="java.util.List"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.server.protocol.BlockReportContext"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto"/>
</method>
<method name="convert" return="org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="context" type="org.apache.hadoop.hdfs.server.protocol.BlockReportContext"/>
</method>
<doc>
<![CDATA[Utilities for converting protobuf classes to and from implementation classes
and other helper utilities to help in dealing with protobuf.
Note that when converting from an internal type to protobuf type, the
converter never return null for protobuf type. The check for internal type
being null must be done before calling the convert() method.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocolPB.PBHelper -->
</package>
<package name="org.apache.hadoop.hdfs.qjournal.client">
</package>
<package name="org.apache.hadoop.hdfs.qjournal.protocol">
</package>
<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
</package>
<package name="org.apache.hadoop.hdfs.qjournal.server">
<!-- start class org.apache.hadoop.hdfs.qjournal.server.Journal -->
<class name="Journal" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Unlock and release resources.]]>
</doc>
</method>
<method name="getLastWriterEpoch" return="long"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="heartbeat"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="reqInfo" type="org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="isFormatted" return="boolean"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="startLogSegment"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="reqInfo" type="org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo"/>
<param name="txid" type="long"/>
<param name="layoutVersion" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Start a new segment at the given txid. The previous segment
must have already been finalized.]]>
</doc>
</method>
<method name="finalizeLogSegment"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="reqInfo" type="org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo"/>
<param name="startTxId" type="long"/>
<param name="endTxId" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Finalize the log segment at the given transaction ID.]]>
</doc>
</method>
<method name="purgeLogsOlderThan"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="reqInfo" type="org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo"/>
<param name="minTxIdToKeep" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@see JournalManager#purgeLogsOlderThan(long)]]>
</doc>
</method>
<method name="getEditLogManifest" return="org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="sinceTxId" type="long"/>
<param name="inProgressOk" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@see QJournalProtocol#getEditLogManifest(String, long, boolean)]]>
</doc>
</method>
<method name="prepareRecovery" return="org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="reqInfo" type="org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo"/>
<param name="segmentTxId" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@see QJournalProtocol#prepareRecovery(RequestInfo, long)]]>
</doc>
</method>
<method name="acceptRecovery"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="reqInfo" type="org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo"/>
<param name="segment" type="org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto"/>
<param name="fromUrl" type="java.net.URL"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@see QJournalProtocol#acceptRecovery(RequestInfo, QJournalProtocolProtos.SegmentStateProto, URL)]]>
</doc>
</method>
<method name="doPreUpgrade"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="doUpgrade"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="sInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="doFinalize"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="canRollBack" return="java.lang.Boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
<param name="prevStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
<param name="targetLayoutVersion" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="doRollback"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getJournalCTime" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="LAST_PROMISED_FILENAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="LAST_WRITER_EPOCH" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A JournalNode can manage journals for several clusters at once.
Each such journal is entirely independent despite being hosted by
the same JVM.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.qjournal.server.Journal -->
<!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
<interface name="JournalNodeMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getJournalsStatus" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
@return A string presenting status for each journal]]>
</doc>
</method>
<doc>
<![CDATA[This is the JMX management interface for JournalNode information]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
</package>
<package name="org.apache.hadoop.hdfs.security.token.block">
<!-- start class org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager -->
<class name="BlockPoolTokenSecretManager" extends="org.apache.hadoop.security.token.SecretManager"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockPoolTokenSecretManager"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="addBlockPool"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
<param name="secretMgr" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager"/>
<doc>
<![CDATA[Add a block pool Id and corresponding {@link BlockTokenSecretManager} to map
@param bpid block pool Id
@param secretMgr {@link BlockTokenSecretManager}]]>
</doc>
</method>
<method name="isBlockPoolRegistered" return="boolean"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
</method>
<method name="createIdentifier" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return an empty BlockTokenIdentifer]]>
</doc>
</method>
<method name="createPassword" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="identifier" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
</method>
<method name="retrievePassword" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="identifier" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
<exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
</method>
<method name="checkAccess"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
<param name="userId" type="java.lang.String"/>
<param name="block" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
<param name="mode" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"/>
<exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
<doc>
<![CDATA[See {@link BlockTokenSecretManager#checkAccess(BlockTokenIdentifier,
String, ExtendedBlock, AccessMode)}]]>
</doc>
</method>
<method name="checkAccess"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="token" type="org.apache.hadoop.security.token.Token"/>
<param name="userId" type="java.lang.String"/>
<param name="block" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
<param name="mode" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"/>
<exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
<doc>
<![CDATA[See {@link BlockTokenSecretManager#checkAccess(Token, String,
ExtendedBlock, AccessMode)}]]>
</doc>
</method>
<method name="addKeys"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
<param name="exportedKeys" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[See {@link BlockTokenSecretManager#addKeys(ExportedBlockKeys)}]]>
</doc>
</method>
<method name="generateToken" return="org.apache.hadoop.security.token.Token"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
<param name="of" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[See {@link BlockTokenSecretManager#generateToken(ExtendedBlock, EnumSet)}]]>
</doc>
</method>
<method name="clearAllKeysForTesting"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="generateDataEncryptionKey" return="org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockPoolId" type="java.lang.String"/>
</method>
<method name="retrieveDataEncryptionKey" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="keyId" type="int"/>
<param name="blockPoolId" type="java.lang.String"/>
<param name="nonce" type="byte[]"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[Manages a {@link BlockTokenSecretManager} per block pool. Routes the requests
given a block pool Id to corresponding {@link BlockTokenSecretManager}]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager -->
<!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode -->
<class name="BlockTokenSecretManager.AccessMode" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode -->
</package>
<package name="org.apache.hadoop.hdfs.security.token.delegation">
<!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.SWebHdfsDelegationTokenIdentifier -->
<class name="DelegationTokenIdentifier.SWebHdfsDelegationTokenIdentifier" extends="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.WebHdfsDelegationTokenIdentifier"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SWebHdfsDelegationTokenIdentifier"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getKind" return="org.apache.hadoop.io.Text"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.SWebHdfsDelegationTokenIdentifier -->
<!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.WebHdfsDelegationTokenIdentifier -->
<class name="DelegationTokenIdentifier.WebHdfsDelegationTokenIdentifier" extends="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="WebHdfsDelegationTokenIdentifier"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getKind" return="org.apache.hadoop.io.Text"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.WebHdfsDelegationTokenIdentifier -->
<!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState -->
<class name="DelegationTokenSecretManager.SecretManagerState" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SecretManagerState" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection, java.util.List, java.util.List"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<field name="section" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="keys" type="java.util.List"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="tokens" type="java.util.List"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState -->
</package>
<package name="org.apache.hadoop.hdfs.server.balancer">
<!-- start class org.apache.hadoop.hdfs.server.balancer.Dispatcher.DBlock -->
<class name="Dispatcher.DBlock" extends="org.apache.hadoop.hdfs.server.balancer.MovedBlocks.Locations"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DBlock" type="org.apache.hadoop.hdfs.protocol.Block"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[A class for keeping track of block locations in the dispatcher.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.Dispatcher.DBlock -->
<!-- start class org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode -->
<class name="Dispatcher.DDatanode" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDatanodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="addTarget" return="org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="storageType" type="org.apache.hadoop.fs.StorageType"/>
<param name="maxSize2Move" type="long"/>
</method>
<method name="addSource" return="org.apache.hadoop.hdfs.server.balancer.Dispatcher.Source"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="storageType" type="org.apache.hadoop.fs.StorageType"/>
<param name="maxSize2Move" type="long"/>
<param name="d" type="org.apache.hadoop.hdfs.server.balancer.Dispatcher"/>
</method>
<field name="delayUntil" type="long"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A class that keeps track of a datanode.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode -->
<!-- start class org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup -->
<class name="Dispatcher.DDatanode.StorageGroup" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getStorageType" return="org.apache.hadoop.fs.StorageType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDatanodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incScheduledSize"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="size" type="long"/>
<doc>
<![CDATA[increment scheduled size]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<doc>
<![CDATA[A group of storages in a datanode with the same storage type.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup -->
<!-- start class org.apache.hadoop.hdfs.server.balancer.Dispatcher.PendingMove -->
<class name="Dispatcher.PendingMove" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[This class keeps track of a scheduled block move]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.Dispatcher.PendingMove -->
<!-- start class org.apache.hadoop.hdfs.server.balancer.Dispatcher.Source -->
<class name="Dispatcher.Source" extends="org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="addPendingMove" return="org.apache.hadoop.hdfs.server.balancer.Dispatcher.PendingMove"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.server.balancer.Dispatcher.DBlock"/>
<param name="target" type="org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup"/>
<doc>
<![CDATA[Add a pending move]]>
</doc>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<doc>
<![CDATA[A node that can be the sources of a block move]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.Dispatcher.Source -->
<!-- start class org.apache.hadoop.hdfs.server.balancer.Dispatcher.StorageGroupMap -->
<class name="Dispatcher.StorageGroupMap" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="StorageGroupMap"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="get" return="G"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="datanodeUuid" type="java.lang.String"/>
<param name="storageType" type="org.apache.hadoop.fs.StorageType"/>
</method>
<method name="put"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="g" type="G"/>
</method>
<method name="values" return="java.util.Collection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.Dispatcher.StorageGroupMap -->
<!-- start class org.apache.hadoop.hdfs.server.balancer.ExitStatus -->
<class name="ExitStatus" extends="java.lang.Enum"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.balancer.ExitStatus[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.balancer.ExitStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getExitCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the command line exit code.]]>
</doc>
</method>
<doc>
<![CDATA[Exit status - The values associated with each exit status is directly mapped
to the process's exit code in command line.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.ExitStatus -->
<!-- start interface org.apache.hadoop.hdfs.server.balancer.Matcher -->
<interface name="Matcher" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="match" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="cluster" type="org.apache.hadoop.net.NetworkTopology"/>
<param name="left" type="org.apache.hadoop.net.Node"/>
<param name="right" type="org.apache.hadoop.net.Node"/>
<doc>
<![CDATA[Given the cluster topology, does the left node match the right node?]]>
</doc>
</method>
<field name="SAME_NODE_GROUP" type="org.apache.hadoop.hdfs.server.balancer.Matcher"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Match datanodes in the same node group.]]>
</doc>
</field>
<field name="SAME_RACK" type="org.apache.hadoop.hdfs.server.balancer.Matcher"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Match datanodes in the same rack.]]>
</doc>
</field>
<field name="ANY_OTHER" type="org.apache.hadoop.hdfs.server.balancer.Matcher"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Match any datanode with any other datanode.]]>
</doc>
</field>
<doc>
<![CDATA[A matcher interface for matching nodes.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.balancer.Matcher -->
<!-- start class org.apache.hadoop.hdfs.server.balancer.MovedBlocks -->
<class name="MovedBlocks" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="MovedBlocks" type="long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[initialize the moved blocks collection]]>
</doc>
</constructor>
<method name="put"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.server.balancer.MovedBlocks.Locations"/>
<doc>
<![CDATA[add a block thus marking a block to be moved]]>
</doc>
</method>
<method name="contains" return="boolean"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
<doc>
<![CDATA[@return if a block is marked as moved]]>
</doc>
</method>
<method name="cleanup"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[remove old blocks]]>
</doc>
</method>
<doc>
<![CDATA[This window makes sure to keep blocks that have been moved within a fixed
time interval (default is 1.5 hour). Old window has blocks that are older;
Current window has blocks that are more recent; Cleanup method triggers the
check if blocks in the old window are more than the fixed time interval. If
yes, purge the old window and then move blocks in current window to old
window.
@param <L> Location type]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.MovedBlocks -->
<!-- start class org.apache.hadoop.hdfs.server.balancer.MovedBlocks.Locations -->
<class name="MovedBlocks.Locations" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Locations" type="org.apache.hadoop.hdfs.protocol.Block"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="clearLocations"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[clean block locations]]>
</doc>
</method>
<method name="addLocation"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="loc" type="L"/>
<doc>
<![CDATA[add a location]]>
</doc>
</method>
<method name="isLocatedOn" return="boolean"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="loc" type="L"/>
<doc>
<![CDATA[@return if the block is located on the given location.]]>
</doc>
</method>
<method name="getLocations" return="java.util.List"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return its locations]]>
</doc>
</method>
<method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getNumBytes" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="locations" type="java.util.List"
transient="false" volatile="false"
static="false" final="true" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[The locations of the replicas of the block.]]>
</doc>
</field>
<doc>
<![CDATA[A class for keeping track of a block and its locations]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.balancer.MovedBlocks.Locations -->
</package>
<package name="org.apache.hadoop.hdfs.server.blockmanagement">
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager -->
<class name="BlockIdManager" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockIdManager" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockManager"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="upgradeGenerationStampToV2" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Upgrades the generation stamp for the filesystem
by reserving a sufficient range for all existing blocks.
Should be invoked only during the first upgrade to
sequential block IDs.]]>
</doc>
</method>
<method name="setGenerationStampV1Limit"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="stamp" type="long"/>
<doc>
<![CDATA[Sets the generation stamp that delineates random and sequentially
allocated block IDs.
@param stamp set generation stamp limit to this value]]>
</doc>
</method>
<method name="getGenerationStampAtblockIdSwitch" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the value of the generation stamp that delineates sequential
and random block IDs.]]>
</doc>
</method>
<method name="setLastAllocatedBlockId"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockId" type="long"/>
<doc>
<![CDATA[Sets the maximum allocated block ID for this filesystem. This is
the basis for allocating new block IDs.]]>
</doc>
</method>
<method name="getLastAllocatedBlockId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the maximum sequentially allocated block ID for this filesystem]]>
</doc>
</method>
<method name="setGenerationStampV1"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="stamp" type="long"/>
<doc>
<![CDATA[Sets the current generation stamp for legacy blocks]]>
</doc>
</method>
<method name="getGenerationStampV1" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the current generation stamp for legacy blocks]]>
</doc>
</method>
<method name="setGenerationStampV2"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="stamp" type="long"/>
<doc>
<![CDATA[Gets the current generation stamp for this filesystem]]>
</doc>
</method>
<method name="getGenerationStampV2" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="nextGenerationStamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="legacyBlock" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Increments, logs and then returns the stamp]]>
</doc>
</method>
<method name="getGenerationStampV1Limit" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isLegacyBlock" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
<doc>
<![CDATA[Determine whether the block ID was randomly generated (legacy) or
sequentially generated. The generation stamp value is used to
make the distinction.
@return true if the block ID was randomly generated, false otherwise.]]>
</doc>
</method>
<method name="nextBlockId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Increments, logs and then returns the block ID]]>
</doc>
</method>
<method name="isGenStampInFuture" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
</method>
<method name="clear"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[BlockIdManager allocates the generation stamps and the block ID. The
{@see FSNamesystem} is responsible for persisting the allocations in the
{@see EditLog}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction -->
<class name="BlockInfoContiguousUnderConstruction" extends="org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockInfoContiguousUnderConstruction" type="org.apache.hadoop.hdfs.protocol.Block, short"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create block and set its state to
{@link BlockUCState#UNDER_CONSTRUCTION}.]]>
</doc>
</constructor>
<constructor name="BlockInfoContiguousUnderConstruction" type="org.apache.hadoop.hdfs.protocol.Block, short, org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState, org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create a block that is currently being constructed.]]>
</doc>
</constructor>
<method name="setExpectedLocations"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="targets" type="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo[]"/>
<doc>
<![CDATA[Set expected locations]]>
</doc>
</method>
<method name="getExpectedStorageLocations" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create array of expected replica locations
(as has been assigned by chooseTargets()).]]>
</doc>
</method>
<method name="getNumExpectedLocations" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the number of expected locations]]>
</doc>
</method>
<method name="getBlockUCState" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return the state of the block under construction.
@see BlockUCState]]>
</doc>
</method>
<method name="getBlockRecoveryId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get block recovery ID]]>
</doc>
</method>
<method name="getTruncateBlock" return="org.apache.hadoop.hdfs.protocol.Block"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get recover block]]>
</doc>
</method>
<method name="setTruncateBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="recoveryBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
</method>
<method name="setGenerationStampAndVerifyReplicas"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="genStamp" type="long"/>
<doc>
<![CDATA[Process the recorded replicas. When about to commit or finish the
pipeline recovery sort out bad replicas.
@param genStamp The final generation stamp for the block.]]>
</doc>
</method>
<method name="initializeBlockRecovery"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="recoveryId" type="long"/>
<doc>
<![CDATA[Initialize lease recovery for this block.
Find the first alive data-node starting from the previous primary and
make it primary.]]>
</doc>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="appendStringTo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="sb" type="java.lang.StringBuilder"/>
</method>
<doc>
<![CDATA[Represents a block that is currently being constructed.<br>
This is usually the last block of a file opened for write or append.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup -->
<class name="BlockPlacementPolicyWithNodeGroup" extends="org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockPlacementPolicyWithNodeGroup" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.blockmanagement.FSClusterStats, org.apache.hadoop.net.NetworkTopology, org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<constructor name="BlockPlacementPolicyWithNodeGroup"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<method name="initialize"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="stats" type="org.apache.hadoop.hdfs.server.blockmanagement.FSClusterStats"/>
<param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
<param name="host2datanodeMap" type="org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap"/>
</method>
<method name="chooseLocalStorage" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="localMachine" type="org.apache.hadoop.net.Node"/>
<param name="excludedNodes" type="java.util.Set"/>
<param name="blocksize" type="long"/>
<param name="maxNodesPerRack" type="int"/>
<param name="results" type="java.util.List"/>
<param name="avoidStaleNodes" type="boolean"/>
<param name="storageTypes" type="java.util.EnumMap"/>
<param name="fallbackToLocalRack" type="boolean"/>
<exception name="BlockPlacementPolicy.NotEnoughReplicasException" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy.NotEnoughReplicasException"/>
<doc>
<![CDATA[choose local node of localMachine as the target.
if localMachine is not available, choose a node on the same nodegroup or
rack instead.
@return the chosen node]]>
</doc>
</method>
<method name="chooseLocalRack" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="localMachine" type="org.apache.hadoop.net.Node"/>
<param name="excludedNodes" type="java.util.Set"/>
<param name="blocksize" type="long"/>
<param name="maxNodesPerRack" type="int"/>
<param name="results" type="java.util.List"/>
<param name="avoidStaleNodes" type="boolean"/>
<param name="storageTypes" type="java.util.EnumMap"/>
<exception name="BlockPlacementPolicy.NotEnoughReplicasException" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy.NotEnoughReplicasException"/>
</method>
<method name="chooseRemoteRack"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="numOfReplicas" type="int"/>
<param name="localMachine" type="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor"/>
<param name="excludedNodes" type="java.util.Set"/>
<param name="blocksize" type="long"/>
<param name="maxReplicasPerRack" type="int"/>
<param name="results" type="java.util.List"/>
<param name="avoidStaleNodes" type="boolean"/>
<param name="storageTypes" type="java.util.EnumMap"/>
<exception name="BlockPlacementPolicy.NotEnoughReplicasException" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy.NotEnoughReplicasException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="getRack" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="cur" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
</method>
<method name="addToExcludedNodes" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="chosenNode" type="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor"/>
<param name="excludedNodes" type="java.util.Set"/>
<doc>
<![CDATA[Find other nodes in the same nodegroup of <i>localMachine</i> and add them
into <i>excludeNodes</i> as replica should not be duplicated for nodes
within the same nodegroup
@return number of new excluded nodes]]>
</doc>
</method>
<method name="pickupReplicaSet" return="java.util.Collection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="first" type="java.util.Collection"/>
<param name="second" type="java.util.Collection"/>
<doc>
<![CDATA[Pick up replica node set for deleting replica as over-replicated.
First set contains replica nodes on rack with more than one
replica while second set contains remaining replica nodes.
If first is not empty, divide first set into two subsets:
moreThanOne contains nodes on nodegroup with more than one replica
exactlyOne contains the remaining nodes in first set
then pickup priSet if not empty.
If first is empty, then pick second.]]>
</doc>
</method>
<doc>
<![CDATA[The class is responsible for choosing the desired number of targets
for placing block replicas on environment with node-group layer.
The replica placement strategy is adjusted to:
If the writer is on a datanode, the 1st replica is placed on the local
node (or local node-group), otherwise a random datanode.
The 2nd replica is placed on a datanode that is on a different rack with 1st
replica node.
The 3rd replica is placed on a datanode which is on a different node-group
but the same rack as the second replica node.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatusDefault -->
<class name="BlockPlacementStatusDefault" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus"/>
<constructor name="BlockPlacementStatusDefault" type="int, int"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isPlacementPolicySatisfied" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getErrorDescription" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatusDefault -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite -->
<class name="BlockStoragePolicySuite" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockStoragePolicySuite" type="byte, org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="createDefaultSuite" return="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPolicy" return="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="byte"/>
<doc>
<![CDATA[@return the corresponding policy.]]>
</doc>
</method>
<method name="getDefaultPolicy" return="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the default policy.]]>
</doc>
</method>
<method name="getPolicy" return="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="policyName" type="java.lang.String"/>
</method>
<method name="getAllPolicies" return="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="buildXAttrName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="buildXAttr" return="org.apache.hadoop.fs.XAttr"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="policyId" type="byte"/>
</method>
<method name="isStoragePolicyXAttr" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="xattr" type="org.apache.hadoop.fs.XAttr"/>
</method>
<field name="STORAGE_POLICY_XATTR_NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="XAttrNS" type="org.apache.hadoop.fs.XAttr.NameSpace"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="ID_BIT_LENGTH" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="ID_UNSPECIFIED" type="byte"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A collection of block storage policies.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason -->
<class name="CorruptReplicasMap.Reason" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[The corruption reason code]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList -->
<class name="DatanodeDescriptor.CachedBlocksList" extends="org.apache.hadoop.util.IntrusiveCollection"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getDatanode" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getType" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A list of CachedBlock objects on this datanode.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type -->
<class name="DatanodeDescriptor.CachedBlocksList.Type" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.DecommissioningStatus -->
<class name="DatanodeDescriptor.DecommissioningStatus" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DecommissioningStatus"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getUnderReplicatedBlocks" return="int"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the number of under-replicated blocks]]>
</doc>
</method>
<method name="getDecommissionOnlyReplicas" return="int"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the number of decommission-only replicas]]>
</doc>
</method>
<method name="getUnderReplicatedInOpenFiles" return="int"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the number of under-replicated blocks in open files]]>
</doc>
</method>
<method name="setStartTime"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="time" type="long"/>
<doc>
<![CDATA[Set start time]]>
</doc>
</method>
<method name="getStartTime" return="long"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return start time]]>
</doc>
</method>
<doc>
<![CDATA[Decommissioning status]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.DecommissioningStatus -->
<!-- start interface org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics -->
<interface name="DatanodeStatistics" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getCapacityTotal" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the total capacity]]>
</doc>
</method>
<method name="getCapacityUsed" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the used capacity]]>
</doc>
</method>
<method name="getCapacityUsedPercent" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the percentage of the used capacity over the total capacity.]]>
</doc>
</method>
<method name="getCapacityRemaining" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the remaining capacity]]>
</doc>
</method>
<method name="getCapacityRemainingPercent" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the percentage of the remaining capacity over the total capacity.]]>
</doc>
</method>
<method name="getBlockPoolUsed" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the block pool used.]]>
</doc>
</method>
<method name="getPercentBlockPoolUsed" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the percentage of the block pool used space over the total capacity.]]>
</doc>
</method>
<method name="getCacheCapacity" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the total cache capacity of all DataNodes]]>
</doc>
</method>
<method name="getCacheUsed" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the total cache used by all DataNodes]]>
</doc>
</method>
<method name="getXceiverCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the xceiver count]]>
</doc>
</method>
<method name="getInServiceXceiverCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return average xceiver count for non-decommission(ing|ed) nodes]]>
</doc>
</method>
<method name="getNumDatanodesInService" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return number of non-decommission(ing|ed) nodes]]>
</doc>
</method>
<method name="getCapacityUsedNonDFS" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the total used space by data nodes for non-DFS purposes
such as storing temporary files on the local file system]]>
</doc>
</method>
<method name="getStats" return="long[]"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The same as {@link ClientProtocol#getStats()}.
The block related entries are set to -1.]]>
</doc>
</method>
<method name="getExpiredHeartbeats" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the expired heartbeats]]>
</doc>
</method>
<doc>
<![CDATA[Datanode statistics]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo -->
<class name="DatanodeStorageInfo" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="toDatanodeInfos" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="storages" type="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo[]"/>
</method>
<method name="toStorageIDs" return="java.lang.String[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="storages" type="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo[]"/>
</method>
<method name="toStorageTypes" return="org.apache.hadoop.fs.StorageType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="storages" type="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo[]"/>
</method>
<method name="updateFromStorage"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="storage" type="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage"/>
</method>
<method name="setUtilizationForTesting"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="capacity" type="long"/>
<param name="dfsUsed" type="long"/>
<param name="remaining" type="long"/>
<param name="blockPoolUsed" type="long"/>
</method>
<method name="getStorageType" return="org.apache.hadoop.fs.StorageType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="addBlock" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous"/>
</method>
<method name="removeBlock" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous"/>
</method>
<method name="getDatanodeDescriptor" return="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrementBlocksScheduled"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="storages" type="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo[]"/>
<doc>
<![CDATA[Increment the number of blocks scheduled for each given storage]]>
</doc>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo[]"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A Datanode has one or more storages. A storage in the Datanode is represented
by this class.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas -->
<class name="NumberReplicas" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="liveReplicas" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="decommissionedReplicas" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="corruptReplicas" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="excessReplicas" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="replicasOnStaleNodes" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the number of replicas which are on stale nodes.
This is not mutually exclusive with the other counts -- ie a
replica may count as both "live" and "stale".]]>
</doc>
</method>
<doc>
<![CDATA[A immutable object that stores the number of live replicas and
the number of decommissined Replicas.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas -->
<!-- start class org.apache.hadoop.hdfs.server.blockmanagement.UnresolvedTopologyException -->
<class name="UnresolvedTopologyException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnresolvedTopologyException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[This exception is thrown if resolving topology path
for a node fails.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.blockmanagement.UnresolvedTopologyException -->
</package>
<package name="org.apache.hadoop.hdfs.server.common">
<!-- start class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState -->
<class name="HdfsServerConstants.BlockUCState" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[States, which a block can go through while it is under construction.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState -->
<!-- start class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole -->
<class name="HdfsServerConstants.NamenodeRole" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Defines the NameNode role.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole -->
<!-- start class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType -->
<class name="HdfsServerConstants.NodeType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Type of the node]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType -->
<!-- start class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState -->
<class name="HdfsServerConstants.ReplicaState" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getValue" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="v" type="int"/>
</method>
<method name="read" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Read from in]]>
</doc>
</method>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.DataOutput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Write to out]]>
</doc>
</method>
<doc>
<![CDATA[Block replica states, which it can go through while being constructed.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState -->
<!-- start class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption -->
<class name="HdfsServerConstants.RollingUpgradeStartupOption" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getOptionString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="matches" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="option" type="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption"/>
</method>
<method name="getAllOptionString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Startup options for rolling upgrade.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption -->
<!-- start class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption -->
<class name="HdfsServerConstants.StartupOption" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toNodeRole" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setClusterId"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="cid" type="java.lang.String"/>
</method>
<method name="getClusterId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setRollingUpgradeStartupOption"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="opt" type="java.lang.String"/>
</method>
<method name="getRollingUpgradeStartupOption" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="createRecoveryContext" return="org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setForce"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="force" type="int"/>
</method>
<method name="getForce" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getForceFormat" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setForceFormat"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="force" type="boolean"/>
</method>
<method name="getInteractiveFormat" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setInteractiveFormat"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="interactive" type="boolean"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getEnum" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="value" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Startup options]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption -->
<!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
<class name="Storage.StorageState" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
</package>
<package name="org.apache.hadoop.hdfs.server.datanode">
<!-- start interface org.apache.hadoop.hdfs.server.datanode.BPServiceActorAction -->
<interface name="BPServiceActorAction" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="reportTo"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpNamenode" type="org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB"/>
<param name="bpRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
<exception name="BPServiceActorActionException" type="org.apache.hadoop.hdfs.server.datanode.BPServiceActorActionException"/>
</method>
<doc>
<![CDATA[Base class for BPServiceActor class
Issued by BPOfferSerivce class to tell BPServiceActor
to take several actions.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.datanode.BPServiceActorAction -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.BPServiceActorActionException -->
<class name="BPServiceActorActionException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BPServiceActorActionException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.BPServiceActorActionException -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.CachingStrategy -->
<class name="CachingStrategy" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CachingStrategy" type="java.lang.Boolean, java.lang.Long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="newDefaultStrategy" return="org.apache.hadoop.hdfs.server.datanode.CachingStrategy"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="newDropBehind" return="org.apache.hadoop.hdfs.server.datanode.CachingStrategy"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDropBehind" return="java.lang.Boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getReadahead" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[The caching strategy we should use for an HDFS read or write operation.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.CachingStrategy -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.CachingStrategy.Builder -->
<class name="CachingStrategy.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder" type="org.apache.hadoop.hdfs.server.datanode.CachingStrategy"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setDropBehind" return="org.apache.hadoop.hdfs.server.datanode.CachingStrategy.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dropBehind" type="java.lang.Boolean"/>
</method>
<method name="setReadahead" return="org.apache.hadoop.hdfs.server.datanode.CachingStrategy.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="readahead" type="java.lang.Long"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.server.datanode.CachingStrategy"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.CachingStrategy.Builder -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ChunkChecksum -->
<class name="ChunkChecksum" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ChunkChecksum" type="long, byte[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getDataLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getChecksum" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[holder class that holds checksum bytes and the length in a block at which
the checksum bytes end
ex: length = 1023 and checksum is 4 bytes which is for 512 bytes, then
the checksum applies for the last chunk, or bytes 512 - 1023]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ChunkChecksum -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion.Feature -->
<class name="DataNodeLayoutVersion.Feature" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature"/>
<method name="values" return="org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion.Feature[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion.Feature"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Enums for features that change the layout version.
<br><br>
To add a new layout version:
<ul>
<li>Define a new enum constant with a short enum name, the new layout version
and description of the added feature.</li>
<li>When adding a layout version with an ancestor that is not same as
its immediate predecessor, use the constructor where a specific ancestor
can be passed.
</li>
</ul>]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion.Feature -->
<!-- start interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean -->
<interface name="DataNodeMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getVersion" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the version of Hadoop.
@return the version of Hadoop]]>
</doc>
</method>
<method name="getRpcPort" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the rpc port.
@return the rpc port]]>
</doc>
</method>
<method name="getHttpPort" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the http port.
@return the http port]]>
</doc>
</method>
<method name="getNamenodeAddresses" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the namenode IP addresses
@return the namenode IP addresses that the datanode is talking to]]>
</doc>
</method>
<method name="getVolumeInfo" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the information of each volume on the Datanode. Please
see the implementation for the format of returned information.
@return the volume info]]>
</doc>
</method>
<method name="getClusterId" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the cluster id.
@return the cluster id]]>
</doc>
</method>
<method name="getXceiverCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns an estimate of the number of Datanode threads
actively transferring blocks.]]>
</doc>
</method>
<method name="getDatanodeNetworkCounts" return="java.util.Map"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the network error counts on a per-Datanode basis.]]>
</doc>
</method>
<doc>
<![CDATA[This is the JMX management interface for data node information]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ErrorReportAction -->
<class name="ErrorReportAction" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.datanode.BPServiceActorAction"/>
<constructor name="ErrorReportAction" type="int, java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="reportTo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpNamenode" type="org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB"/>
<param name="bpRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
<exception name="BPServiceActorActionException" type="org.apache.hadoop.hdfs.server.datanode.BPServiceActorActionException"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<doc>
<![CDATA[A ErrorReportAction is an instruction issued by BPOfferService to
BPServiceActor about a particular block encapsulated in errorMessage.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ErrorReportAction -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.FinalizedReplica -->
<class name="FinalizedReplica" extends="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="FinalizedReplica" type="long, long, long, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi, java.io.File"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param blockId block id
@param len replica length
@param genStamp replica generation stamp
@param vol volume where replica is located
@param dir directory path where block and meta files are located]]>
</doc>
</constructor>
<constructor name="FinalizedReplica" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi, java.io.File"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param block a block
@param vol volume where replica is located
@param dir directory path where block and meta files are located]]>
</doc>
</constructor>
<constructor name="FinalizedReplica" type="org.apache.hadoop.hdfs.server.datanode.FinalizedReplica"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Copy constructor.
@param from where to copy construct from]]>
</doc>
</constructor>
<method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isUnlinked" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setUnlinked"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getVisibleLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesOnDisk" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[This class describes a replica that has been finalized.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.FinalizedReplica -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException -->
<class name="ReplicaAlreadyExistsException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReplicaAlreadyExistsException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ReplicaAlreadyExistsException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Exception indicating that the target block already exists
and is not set to be recovered/overwritten.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten -->
<class name="ReplicaBeingWritten" extends="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReplicaBeingWritten" type="long, long, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi, java.io.File, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor for a zero length replica
@param blockId block id
@param genStamp replica generation stamp
@param vol volume where replica is located
@param dir directory path where block and meta files are located
@param bytesToReserve disk space to reserve for this replica, based on
the estimated maximum block length.]]>
</doc>
</constructor>
<constructor name="ReplicaBeingWritten" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi, java.io.File, java.lang.Thread"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param block a block
@param vol volume where replica is located
@param dir directory path where block and meta files are located
@param writer a thread that is writing to this replica]]>
</doc>
</constructor>
<constructor name="ReplicaBeingWritten" type="long, long, long, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi, java.io.File, java.lang.Thread, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param blockId block id
@param len replica length
@param genStamp replica generation stamp
@param vol volume where replica is located
@param dir directory path where block and meta files are located
@param writer a thread that is writing to this replica
@param bytesToReserve disk space to reserve for this replica, based on
the estimated maximum block length.]]>
</doc>
</constructor>
<constructor name="ReplicaBeingWritten" type="org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Copy constructor.
@param from where to copy from]]>
</doc>
</constructor>
<method name="getVisibleLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[This class represents replicas being written.
Those are the replicas that
are created in a pipeline initiated by a dfs client.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaHandler -->
<class name="ReplicaHandler" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<constructor name="ReplicaHandler" type="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[This class includes a replica being actively written and the reference to
the fs volume where this replica is located.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaHandler -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.ReplicaDirInfo -->
<class name="ReplicaInfo.ReplicaDirInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReplicaDirInfo" type="java.lang.String, boolean"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<field name="baseDirPath" type="java.lang.String"
transient="false" volatile="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</field>
<field name="hasSubidrs" type="boolean"
transient="false" volatile="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.ReplicaDirInfo -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline -->
<class name="ReplicaInPipeline" extends="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"/>
<constructor name="ReplicaInPipeline" type="long, long, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi, java.io.File, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor for a zero length replica
@param blockId block id
@param genStamp replica generation stamp
@param vol volume where replica is located
@param dir directory path where block and meta files are located
@param bytesToReserve disk space to reserve for this replica, based on
the estimated maximum block length.]]>
</doc>
</constructor>
<constructor name="ReplicaInPipeline" type="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Copy constructor.
@param from where to copy from]]>
</doc>
</constructor>
<method name="getVisibleLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesAcked" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setBytesAcked"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesAcked" type="long"/>
</method>
<method name="getBytesOnDisk" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesReserved" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="releaseAllBytesReserved"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setLastChecksumAndDataLen"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dataLength" type="long"/>
<param name="lastChecksum" type="byte[]"/>
</method>
<method name="getLastChecksumAndDataLen" return="org.apache.hadoop.hdfs.server.datanode.ChunkChecksum"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setWriter"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="writer" type="java.lang.Thread"/>
<doc>
<![CDATA[Set the thread that is writing to this replica
@param writer a thread writing to this replica]]>
</doc>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="stopWriter"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="xceiverStopTimeout" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Interrupt the writing thread and wait until it dies
@throws IOException the waiting is interrupted]]>
</doc>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="createStreams" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="isCreate" type="boolean"/>
<param name="requestedChecksum" type="org.apache.hadoop.util.DataChecksum"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[This class defines a replica in a pipeline, which
includes a persistent replica being written to by a dfs client or
a temporary replica being replicated by a source datanode or
being copied for the balancing purpose.
The base class implements a temporary replica]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline -->
<!-- start interface org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface -->
<interface name="ReplicaInPipelineInterface" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.datanode.Replica"/>
<method name="setNumBytes"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesReceived" type="long"/>
<doc>
<![CDATA[Set the number of bytes received
@param bytesReceived number of bytes received]]>
</doc>
</method>
<method name="getBytesAcked" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the number of bytes acked
@return the number of bytes acked]]>
</doc>
</method>
<method name="setBytesAcked"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesAcked" type="long"/>
<doc>
<![CDATA[Set the number bytes that have acked
@param bytesAcked number bytes acked]]>
</doc>
</method>
<method name="releaseAllBytesReserved"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Release any disk space reserved for this replica.]]>
</doc>
</method>
<method name="setLastChecksumAndDataLen"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dataLength" type="long"/>
<param name="lastChecksum" type="byte[]"/>
<doc>
<![CDATA[store the checksum for the last chunk along with the data length
@param dataLength number of bytes on disk
@param lastChecksum - checksum bytes for the last chunk]]>
</doc>
</method>
<method name="getLastChecksumAndDataLen" return="org.apache.hadoop.hdfs.server.datanode.ChunkChecksum"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[gets the last chunk checksum and the length of the block corresponding
to that checksum]]>
</doc>
</method>
<method name="createStreams" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="isCreate" type="boolean"/>
<param name="requestedChecksum" type="org.apache.hadoop.util.DataChecksum"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create output streams for writing to this replica,
one for block file and one for CRC file
@param isCreate if it is for creation
@param requestedChecksum the checksum the writer would prefer to use
@return output streams for writing
@throws IOException if any error occurs]]>
</doc>
</method>
<doc>
<![CDATA[This defines the interface of a replica in Pipeline that's being written to]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
<class name="ReplicaNotFoundException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReplicaNotFoundException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ReplicaNotFoundException" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ReplicaNotFoundException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<field name="NON_RBW_REPLICA" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="UNFINALIZED_REPLICA" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="UNFINALIZED_AND_NONRBW_REPLICA" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="NON_EXISTENT_REPLICA" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="UNEXPECTED_GS_REPLICA" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Exception indicating that DataNode does not have a replica
that matches the target block.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery -->
<class name="ReplicaUnderRecovery" extends="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReplicaUnderRecovery" type="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ReplicaUnderRecovery" type="org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Copy constructor.
@param from where to copy from]]>
</doc>
</constructor>
<method name="getRecoveryID" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the recovery id
@return the generation stamp that the replica will be bumped to]]>
</doc>
</method>
<method name="setRecoveryID"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="recoveryId" type="long"/>
<doc>
<![CDATA[Set the recovery id
@param recoveryId the new recoveryId]]>
</doc>
</method>
<method name="getOriginalReplica" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the original replica that's under recovery
@return the original replica under recovery]]>
</doc>
</method>
<method name="isUnlinked" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setUnlinked"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getVisibleLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesOnDisk" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setBlockId"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockId" type="long"/>
</method>
<method name="setGenerationStamp"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="gs" type="long"/>
</method>
<method name="setNumBytes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="numBytes" type="long"/>
</method>
<method name="setDir"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dir" type="java.io.File"/>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="createInfo" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[This class represents replicas that are under block recovery
It has a recovery id that is equal to the generation stamp
that the replica will be bumped to after recovery
The recovery id is used to handle multiple concurrent block recoveries.
A recovery with higher recovery id preempts recoveries with a lower id.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered -->
<class name="ReplicaWaitingToBeRecovered" extends="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReplicaWaitingToBeRecovered" type="long, long, long, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi, java.io.File"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param blockId block id
@param len replica length
@param genStamp replica generation stamp
@param vol volume where replica is located
@param dir directory path where block and meta files are located]]>
</doc>
</constructor>
<constructor name="ReplicaWaitingToBeRecovered" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi, java.io.File"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param block a block
@param vol volume where replica is located
@param dir directory path where block and meta files are located]]>
</doc>
</constructor>
<constructor name="ReplicaWaitingToBeRecovered" type="org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Copy constructor.
@param from where to copy from]]>
</doc>
</constructor>
<method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isUnlinked" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setUnlinked"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getVisibleLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBytesOnDisk" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[This class represents a replica that is waiting to be recovered.
After a datanode restart, any replica in "rbw" directory is loaded
as a replica waiting to be recovered.
A replica waiting to be recovered does not provision read nor
participates in any pipeline recovery. It will become outdated if its
client continues to write or be recovered as a result of
lease recovery.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ReportBadBlockAction -->
<class name="ReportBadBlockAction" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.datanode.BPServiceActorAction"/>
<constructor name="ReportBadBlockAction" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock, java.lang.String, org.apache.hadoop.fs.StorageType"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="reportTo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpNamenode" type="org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB"/>
<param name="bpRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
<exception name="BPServiceActorActionException" type="org.apache.hadoop.hdfs.server.datanode.BPServiceActorActionException"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<doc>
<![CDATA[ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
{{BPServiceActor}} to report bad block to namenode]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ReportBadBlockAction -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter -->
<class name="SecureDataNodeStarter" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.commons.daemon.Daemon"/>
<constructor name="SecureDataNodeStarter"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="init"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="context" type="org.apache.commons.daemon.DaemonContext"/>
<exception name="Exception" type="java.lang.Exception"/>
</method>
<method name="start"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="Exception" type="java.lang.Exception"/>
</method>
<method name="destroy"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="stop"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="Exception" type="java.lang.Exception"/>
</method>
<method name="getSecureResources" return="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="Exception" type="java.lang.Exception"/>
<doc>
<![CDATA[Acquire privileged resources (i.e., the privileged ports) for the data
node. The privileged resources consist of the port of the RPC server and
the port of HTTP (not HTTPS) server.]]>
</doc>
</method>
<doc>
<![CDATA[Utility class to start a datanode in a secure cluster, first obtaining
privileged resources before main startup and handing them to the datanode.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources -->
<class name="SecureDataNodeStarter.SecureResources" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SecureResources" type="java.net.ServerSocket, java.nio.channels.ServerSocketChannel"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getStreamingSocket" return="java.net.ServerSocket"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHttpServerChannel" return="java.nio.channels.ServerSocketChannel"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Stash necessary resources needed for datanode operation in a secure env.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry -->
<class name="ShortCircuitRegistry" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ShortCircuitRegistry" type="org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="removeShm"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="shm" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm"/>
</method>
<method name="processBlockMlockEvent"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockId" type="org.apache.hadoop.hdfs.ExtendedBlockId"/>
<doc>
<![CDATA[Process a block mlock event from the FsDatasetCache.
@param blockId The block that was mlocked.]]>
</doc>
</method>
<method name="processBlockMunlockRequest" return="boolean"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockId" type="org.apache.hadoop.hdfs.ExtendedBlockId"/>
<doc>
<![CDATA[Mark any slots associated with this blockId as unanchorable.
@param blockId The block ID.
@return True if we should allow the munlock request.]]>
</doc>
</method>
<method name="processBlockInvalidation"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockId" type="org.apache.hadoop.hdfs.ExtendedBlockId"/>
<doc>
<![CDATA[Invalidate any slot associated with a blockId that we are invalidating
(deleting) from this DataNode. When a slot is invalid, the DFSClient will
not use the corresponding replica for new read or mmap operations (although
existing, ongoing read or mmap operations will complete.)
@param blockId The block ID.]]>
</doc>
</method>
<method name="getClientNames" return="java.lang.String"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockId" type="org.apache.hadoop.hdfs.ExtendedBlockId"/>
</method>
<method name="createNewMemorySegment" return="org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="clientName" type="java.lang.String"/>
<param name="sock" type="org.apache.hadoop.net.unix.DomainSocket"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Handle a DFSClient request to create a new memory segment.
@param clientName Client name as reported by the client.
@param sock The DomainSocket to associate with this memory
segment. When this socket is closed, or the
other side writes anything to the socket, the
segment will be closed. This can happen at any
time, including right after this function returns.
@return A NewShmInfo object. The caller must close the
NewShmInfo object once they are done with it.
@throws IOException If the new memory segment could not be created.]]>
</doc>
</method>
<method name="registerSlot"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockId" type="org.apache.hadoop.hdfs.ExtendedBlockId"/>
<param name="slotId" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId"/>
<param name="isCached" type="boolean"/>
<exception name="InvalidRequestException" type="org.apache.hadoop.fs.InvalidRequestException"/>
</method>
<method name="unregisterSlot"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="slotId" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId"/>
<exception name="InvalidRequestException" type="org.apache.hadoop.fs.InvalidRequestException"/>
</method>
<method name="shutdown"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="visit"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="visitor" type="org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.Visitor"/>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Manages client short-circuit memory segments on the DataNode.
DFSClients request shared memory segments from the DataNode. The
ShortCircuitRegistry generates and manages these segments. Each segment
has a randomly generated 128-bit ID which uniquely identifies it. The
segments each contain several "slots."
Before performing a short-circuit read, DFSClients must request a pair of
file descriptors from the DataNode via the REQUEST_SHORT_CIRCUIT_FDS
operation. As part of this operation, DFSClients pass the ID of the shared
memory segment they would like to use to communicate information about this
replica, as well as the slot number within that segment they would like to
use. Slot allocation is always done by the client.
Slots are used to track the state of the block on the both the client and
datanode. When this DataNode mlocks a block, the corresponding slots for the
replicas are marked as "anchorable". Anchorable blocks can be safely read
without verifying the checksum. This means that BlockReaderLocal objects
using these replicas can skip checksumming. It also means that we can do
zero-copy reads on these replicas (the ZCR interface has no way of
verifying checksums.)
When a DN needs to munlock a block, it needs to first wait for the block to
be unanchored by clients doing a no-checksum read or a zero-copy read. The
DN also marks the block's slots as "unanchorable" to prevent additional
clients from initiating these operations in the future.
The counterpart of this class on the client is {@link DfsClientShmManager}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo -->
<class name="ShortCircuitRegistry.NewShmInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="shmId" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="stream" type="java.io.FileInputStream"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.RegisteredShm -->
<class name="ShortCircuitRegistry.RegisteredShm" extends="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.net.unix.DomainSocketWatcher.Handler"/>
<method name="handle" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="sock" type="org.apache.hadoop.net.unix.DomainSocket"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.RegisteredShm -->
<!-- start interface org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.Visitor -->
<interface name="ShortCircuitRegistry.Visitor" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="accept"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="segments" type="java.util.HashMap"/>
<param name="slots" type="com.google.common.collect.HashMultimap"/>
</method>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.Visitor -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException -->
<class name="UnexpectedReplicaStateException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnexpectedReplicaStateException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="UnexpectedReplicaStateException" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock, org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="UnexpectedReplicaStateException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Exception indicating that the replica is in an unexpected state]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.VolumeScanner -->
<class name="VolumeScanner" extends="java.lang.Thread"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="printStats"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="java.lang.StringBuilder"/>
</method>
<method name="run"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="shutdown"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Shut down this scanner.]]>
</doc>
</method>
<method name="markSuspectBlock"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
</method>
<method name="enableBlockPoolId"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
<doc>
<![CDATA[Allow the scanner to scan the given block pool.
@param bpid The block pool id.]]>
</doc>
</method>
<method name="disableBlockPoolId"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
<doc>
<![CDATA[Disallow the scanner from scanning the given block pool.
@param bpid The block pool id.]]>
</doc>
</method>
<field name="LOG" type="org.slf4j.Logger"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[VolumeScanner scans a single volume. Each VolumeScanner has its own thread.<p/>
They are all managed by the DataNode's BlockScanner.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.VolumeScanner -->
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
<!-- start class org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy -->
<class name="AvailableSpaceVolumeChoosingPolicy" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy"/>
<implements name="org.apache.hadoop.conf.Configurable"/>
<constructor name="AvailableSpaceVolumeChoosingPolicy"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setConf"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
</method>
<method name="getConf" return="org.apache.hadoop.conf.Configuration"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="chooseVolume" return="V"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="volumes" type="java.util.List"/>
<param name="replicaSize" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[A DN volume choosing policy which takes into account the amount of free
space on each of the available volumes when considering where to assign a
new replica allocation. By default this policy prefers assigning replicas to
those volumes with more available free space, so as to over time balance the
available space of all the volumes within a DN.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.Factory -->
<class name="FsDatasetSpi.Factory" extends="java.lang.Object"
abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Factory"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getFactory" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.Factory"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[@return the configured factory.]]>
</doc>
</method>
<method name="newInstance" return="D"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="datanode" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
<param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new object.]]>
</doc>
</method>
<method name="isSimulated" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Does the factory create simulated objects?]]>
</doc>
</method>
<doc>
<![CDATA[A factory for creating {@link FsDatasetSpi} objects.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.Factory -->
<!-- start interface org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference -->
<interface name="FsVolumeReference" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<method name="close"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Descrese the reference count of the volume.
@throws IOException it never throws IOException.]]>
</doc>
</method>
<method name="getVolume" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns the underlying volume object]]>
</doc>
</method>
<doc>
<![CDATA[This is the interface for holding reference count as AutoClosable resource.
It increases the reference count by one in the constructor, and decreases
the reference count by one in {@link #close()}.
<pre>
{@code
try (FsVolumeReference ref = volume.obtainReference()) {
// Do IOs on the volume
volume.createRwb(...);
...
}
}
</pre>]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference -->
<!-- start interface org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi -->
<interface name="FsVolumeSpi" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="obtainReference" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="ClosedChannelException" type="java.nio.channels.ClosedChannelException"/>
<doc>
<![CDATA[Obtain a reference object that had increased 1 reference count of the
volume.
It is caller's responsibility to close {@link FsVolumeReference} to decrease
the reference count on the volume.]]>
</doc>
</method>
<method name="getStorageID" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the StorageUuid of the volume]]>
</doc>
</method>
<method name="getBlockPoolList" return="java.lang.String[]"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return a list of block pools.]]>
</doc>
</method>
<method name="getAvailable" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@return the available storage space in bytes.]]>
</doc>
</method>
<method name="getBasePath" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the base path to the volume]]>
</doc>
</method>
<method name="getPath" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@return the path to the volume]]>
</doc>
</method>
<method name="getFinalizedDir" return="java.io.File"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@return the directory for the finalized blocks in the block pool.]]>
</doc>
</method>
<method name="getStorageType" return="org.apache.hadoop.fs.StorageType"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="reserveSpaceForRbw"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesToReserve" type="long"/>
<doc>
<![CDATA[Reserve disk space for an RBW block so a writer does not run out of
space before the block is full.]]>
</doc>
</method>
<method name="releaseReservedSpace"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesToRelease" type="long"/>
<doc>
<![CDATA[Release disk space previously reserved for RBW block.]]>
</doc>
</method>
<method name="isTransientStorage" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns true if the volume is NOT backed by persistent storage.]]>
</doc>
</method>
<method name="newBlockIterator" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
<param name="name" type="java.lang.String"/>
<doc>
<![CDATA[Create a new block iterator. It will start at the beginning of the
block set.
@param bpid The block pool id to iterate over.
@param name The name of the block iterator to create.
@return The new block iterator.]]>
</doc>
</method>
<method name="loadBlockIterator" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bpid" type="java.lang.String"/>
<param name="name" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Load a saved block iterator.
@param bpid The block pool id to iterate over.
@param name The name of the block iterator to load.
@return The saved block iterator.
@throws IOException If there was an IO error loading the saved
block iterator.]]>
</doc>
</method>
<method name="getDataset" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the FSDatasetSpi which this volume is a part of.]]>
</doc>
</method>
<doc>
<![CDATA[This is an interface for the underlying volume.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi -->
<!-- start interface org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator -->
<interface name="FsVolumeSpi.BlockIterator" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<method name="nextBlock" return="org.apache.hadoop.hdfs.protocol.ExtendedBlock"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the next block.<p/>
Note that this block may be removed in between the time we list it,
and the time the caller tries to use it, or it may represent a stale
entry. Callers should handle the case where the returned block no
longer exists.
@return The next block, or null if there are no
more blocks. Null if there was an error
determining the next block.
@throws IOException If there was an error getting the next block in
this volume. In this case, EOF will be set on
the iterator.]]>
</doc>
</method>
<method name="atEnd" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns true if we got to the end of the block pool.]]>
</doc>
</method>
<method name="rewind"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Repositions the iterator at the beginning of the block pool.]]>
</doc>
</method>
<method name="save"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Save this block iterator to the underlying volume.
Any existing saved block iterator with this name will be overwritten.
maxStalenessMs will not be saved.
@throws IOException If there was an error when saving the block
iterator.]]>
</doc>
</method>
<method name="setMaxStalenessMs"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="maxStalenessMs" type="long"/>
<doc>
<![CDATA[Set the maximum staleness of entries that we will return.<p/>
A maximum staleness of 0 means we will never return stale entries; a
larger value will allow us to reduce resource consumption in exchange
for returning more potentially stale entries. Even with staleness set
to 0, consumers of this API must handle race conditions where block
disappear before they can be processed.]]>
</doc>
</method>
<method name="getIterStartMs" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the wall-clock time, measured in milliseconds since the Epoch,
when this iterator was created.]]>
</doc>
</method>
<method name="getLastSavedMs" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the wall-clock time, measured in milliseconds since the Epoch,
when this iterator was last saved. Returns iterStartMs if the
iterator was never saved.]]>
</doc>
</method>
<method name="getBlockPoolId" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the id of the block pool which this iterator traverses.]]>
</doc>
</method>
<doc>
<![CDATA[BlockIterator will return ExtendedBlock entries from a block pool in
this volume. The entries will be returned in sorted order.<p/>
BlockIterator objects themselves do not always have internal
synchronization, so they can only safely be used by a single thread at a
time.<p/>
Closing the iterator does not save it. You must call save to save it.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream -->
<class name="LengthInputStream" extends="java.io.FilterInputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="LengthInputStream" type="java.io.InputStream, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create an stream.
@param in the underlying input stream.
@param length the length of the stream.]]>
</doc>
</constructor>
<method name="getLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the length.]]>
</doc>
</method>
<method name="getWrappedStream" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[An input stream with length.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams -->
<class name="ReplicaInputStreams" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<constructor name="ReplicaInputStreams" type="java.io.InputStream, java.io.InputStream, org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create an object with a data input stream and a checksum input stream.]]>
</doc>
</constructor>
<method name="getDataIn" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the data input stream.]]>
</doc>
</method>
<method name="getChecksumIn" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the checksum input stream.]]>
</doc>
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Contains the input streams for the data and checksum of a replica.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams -->
<class name="ReplicaOutputStreams" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<constructor name="ReplicaOutputStreams" type="java.io.OutputStream, java.io.OutputStream, org.apache.hadoop.util.DataChecksum, boolean"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create an object with a data output stream, a checksum output stream
and a checksum.]]>
</doc>
</constructor>
<method name="getDataOut" return="java.io.OutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the data output stream.]]>
</doc>
</method>
<method name="getChecksumOut" return="java.io.OutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the checksum output stream.]]>
</doc>
</method>
<method name="getChecksum" return="org.apache.hadoop.util.DataChecksum"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the checksum.]]>
</doc>
</method>
<method name="isTransientStorage" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return is writing to a transient storage?]]>
</doc>
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="syncDataOut"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Sync the data stream if it supports it.]]>
</doc>
</method>
<method name="syncChecksumOut"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Sync the checksum stream if it supports it.]]>
</doc>
</method>
<doc>
<![CDATA[Contains the output streams for the data and checksum of a replica.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy -->
<class name="RoundRobinVolumeChoosingPolicy" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy"/>
<constructor name="RoundRobinVolumeChoosingPolicy"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="chooseVolume" return="V"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="volumes" type="java.util.List"/>
<param name="blockSize" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Choose volumes in round-robin order.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy -->
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
<!-- start class org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder -->
<class name="FsDatasetCache.PageRounder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="PageRounder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="round" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="count" type="long"/>
<doc>
<![CDATA[Round up a number to the operating system page size.]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder -->
<!-- start class org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory -->
<class name="FsDatasetFactory" extends="org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.Factory"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="FsDatasetFactory"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="newInstance" return="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="datanode" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
<param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[A factory for creating {@link FsDatasetImpl} objects.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory -->
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.web">
<!-- start class org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer -->
<class name="DatanodeHttpServer" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<constructor name="DatanodeHttpServer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.datanode.DataNode, java.nio.channels.ServerSocketChannel"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getHttpAddress" return="java.net.InetSocketAddress"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHttpsAddress" return="java.net.InetSocketAddress"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="start"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer -->
</package>
<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
<!-- start class org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler -->
<class name="WebHdfsHandler" extends="io.netty.channel.SimpleChannelInboundHandler"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="WebHdfsHandler" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="channelRead0"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ctx" type="io.netty.channel.ChannelHandlerContext"/>
<param name="req" type="io.netty.handler.codec.http.HttpRequest"/>
<exception name="Exception" type="java.lang.Exception"/>
</method>
<method name="handle"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ctx" type="io.netty.channel.ChannelHandlerContext"/>
<param name="req" type="io.netty.handler.codec.http.HttpRequest"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="URISyntaxException" type="java.net.URISyntaxException"/>
</method>
<method name="exceptionCaught"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ctx" type="io.netty.channel.ChannelHandlerContext"/>
<param name="cause" type="java.lang.Throwable"/>
</method>
<field name="WEBHDFS_PREFIX" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="WEBHDFS_PREFIX_LENGTH" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="APPLICATION_OCTET_STREAM" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="APPLICATION_JSON_UTF8" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler -->
</package>
<package name="org.apache.hadoop.hdfs.server.mover">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode">
<!-- start class org.apache.hadoop.hdfs.server.namenode.AclEntryStatusFormat -->
<class name="AclEntryStatusFormat" extends="java.lang.Enum"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.AclEntryStatusFormat[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.AclEntryStatusFormat"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="toInt" return="int[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="aclEntries" type="java.util.List"/>
</method>
<method name="toAclEntries" return="com.google.common.collect.ImmutableList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="entries" type="int[]"/>
</method>
<doc>
<![CDATA[Class to pack an AclEntry into an integer. <br>
An ACL entry is represented by a 32-bit integer in Big Endian format. <br>
The bits can be divided in four segments: <br>
[0:1) || [1:3) || [3:6) || [6:7) || [7:32) <br>
<br>
[0:1) -- the scope of the entry (AclEntryScope) <br>
[1:3) -- the type of the entry (AclEntryType) <br>
[3:6) -- the permission of the entry (FsAction) <br>
[6:7) -- A flag to indicate whether Named entry or not <br>
[7:32) -- the name of the entry, which is an ID that points to a <br>
string in the StringTableSection. <br>]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.AclEntryStatusFormat -->
<!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
<interface name="AuditLogger" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="initialize"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[Called during initialization of the logger.
@param conf The configuration object.]]>
</doc>
</method>
<method name="logAuditEvent"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="succeeded" type="boolean"/>
<param name="userName" type="java.lang.String"/>
<param name="addr" type="java.net.InetAddress"/>
<param name="cmd" type="java.lang.String"/>
<param name="src" type="java.lang.String"/>
<param name="dst" type="java.lang.String"/>
<param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
<doc>
<![CDATA[Called to log an audit event.
<p>
This method must return as quickly as possible, since it's called
in a critical section of the NameNode's operation.
@param succeeded Whether authorization succeeded.
@param userName Name of the user executing the request.
@param addr Remote address of the request.
@param cmd The requested command.
@param src Path of affected source file.
@param dst Path of affected destination file (if any).
@param stat File information for operations that change the file's
metadata (permissions, owner, times, etc).]]>
</doc>
</method>
<doc>
<![CDATA[Interface defining an audit logger.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.CacheManager.PersistState -->
<class name="CacheManager.PersistState" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="PersistState" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection, java.util.List, java.util.List"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<field name="section" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="pools" type="java.util.List"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="directives" type="java.util.List"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.CacheManager.PersistState -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.CachePool.DirectiveList -->
<class name="CachePool.DirectiveList" extends="org.apache.hadoop.util.IntrusiveCollection"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="getCachePool" return="org.apache.hadoop.hdfs.server.namenode.CachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.CachePool.DirectiveList -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet -->
<class name="CancelDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CancelDelegationTokenServlet"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="doGet"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="req" type="javax.servlet.http.HttpServletRequest"/>
<param name="resp" type="javax.servlet.http.HttpServletResponse"/>
<exception name="ServletException" type="javax.servlet.ServletException"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="PATH_SPEC" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="TOKEN" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Cancel delegation tokens over http for use in hftp.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.Content -->
<class name="Content" extends="java.lang.Enum"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.Content[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.Content"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[The content types such as file, directory and symlink to be computed.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.Content -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.Content.Counts -->
<class name="Content.Counts" extends="org.apache.hadoop.hdfs.util.EnumCounters"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="newInstance" return="org.apache.hadoop.hdfs.server.namenode.Content.Counts"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Content counts.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.Content.Counts -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.Content.CountsMap -->
<class name="Content.CountsMap" extends="org.apache.hadoop.hdfs.util.EnumCounters.Map"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[A map of counters for the current state and the snapshots.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.Content.CountsMap -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key -->
<class name="Content.CountsMap.Key" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[The key type of the map.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.ContentCounts -->
<class name="ContentCounts" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getFileCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDirectoryCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSymlinkCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStoragespace" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshotCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshotableDirectoryCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTypeSpaces" return="long[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTypeSpace" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="t" type="org.apache.hadoop.fs.StorageType"/>
</method>
<method name="addContent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="c" type="org.apache.hadoop.hdfs.server.namenode.Content"/>
<param name="val" type="long"/>
</method>
<method name="addContents"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.server.namenode.ContentCounts"/>
</method>
<method name="addTypeSpace"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="t" type="org.apache.hadoop.fs.StorageType"/>
<param name="val" type="long"/>
</method>
<method name="addTypeSpaces"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.util.EnumCounters"/>
</method>
<doc>
<![CDATA[The counter to be computed for content types such as file, directory and symlink,
and the storage type usage such as SSD, DISK, ARCHIVE.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.ContentCounts -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder -->
<class name="ContentCounts.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="file" return="org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="file" type="long"/>
</method>
<method name="directory" return="org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="directory" type="long"/>
</method>
<method name="symlink" return="org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="symlink" type="long"/>
</method>
<method name="length" return="org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="length" type="long"/>
</method>
<method name="storagespace" return="org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="storagespace" type="long"/>
</method>
<method name="snapshot" return="org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshot" type="long"/>
</method>
<method name="snapshotable_directory" return="org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotable_directory" type="long"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.server.namenode.ContentCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.ContentCounts.Builder -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.DefaultINodeAttributesProvider -->
<class name="DefaultINodeAttributesProvider" extends="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DefaultINodeAttributesProvider"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="start"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="stop"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pathElements" type="java.lang.String[]"/>
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
</method>
<field name="DEFAULT_PROVIDER" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider"
transient="false" volatile="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A default implementation of the INodeAttributesProvider]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.DefaultINodeAttributesProvider -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature -->
<class name="DirectoryWithQuotaFeature" extends="java.lang.Object"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.INode.Feature"/>
<method name="addSpaceConsumed2Cache"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="delta" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
<doc>
<![CDATA[Update the space/namespace/type usage of the tree
@param delta the change of the namespace/space/type usage]]>
</doc>
</method>
<method name="getSpaceConsumed" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the namespace and storagespace and typespace consumed.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="DEFAULT_NAMESPACE_QUOTA" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DEFAULT_STORAGE_SPACE_QUOTA" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Quota feature for {@link INodeDirectory}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature.Builder -->
<class name="DirectoryWithQuotaFeature.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="nameSpaceQuota" return="org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="nameSpaceQuota" type="long"/>
</method>
<method name="storageSpaceQuota" return="org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="spaceQuota" type="long"/>
</method>
<method name="typeQuotas" return="org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="typeQuotas" type="org.apache.hadoop.hdfs.util.EnumCounters"/>
</method>
<method name="typeQuota" return="org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.fs.StorageType"/>
<param name="quota" type="long"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature.Builder -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector -->
<class name="EncryptionFaultInjector" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="EncryptionFaultInjector"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInstance" return="org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="startFileAfterGenerateKey"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="instance" type="org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector"
transient="false" volatile="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Used to inject certain faults for testing.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager -->
<class name="EncryptionZoneManager" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="EncryptionZoneManager" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory, org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Construct a new EncryptionZoneManager.
@param dir Enclosing FSDirectory]]>
</doc>
</constructor>
<field name="LOG" type="org.slf4j.Logger"
transient="false" volatile="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Manages the list of encryption zones in the filesystem.
<p/>
The EncryptionZoneManager has its own lock, but relies on the FSDirectory
lock being held for many operations. The FSDirectory lock should not be
taken if the manager lock is already held.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSDirAttrOp -->
<class name="FSDirAttrOp" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="FSDirAttrOp"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSDirAttrOp -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingInputStream -->
<class name="FSEditLogLoader.PositionTrackingInputStream" extends="java.io.FilterInputStream"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.StreamLimiter"/>
<constructor name="PositionTrackingInputStream" type="java.io.InputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="read" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="read" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="data" type="byte[]"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="read" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="data" type="byte[]"/>
<param name="offset" type="int"/>
<param name="length" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setLimit"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="limit" type="long"/>
</method>
<method name="clearLimit"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="mark"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="limit" type="int"/>
</method>
<method name="reset"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getPos" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="skip" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="amt" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[Stream wrapper that keeps track of the current stream position.
This stream also allows us to set a limit on how many bytes we can read
without getting an exception.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingInputStream -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache -->
<class name="FSEditLogOp.OpInstanceCache" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="OpInstanceCache"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="get" return="org.apache.hadoop.hdfs.server.namenode.FSEditLogOp"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="opcode" type="org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Reader -->
<class name="FSEditLogOp.Reader" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Reader" type="java.io.DataInputStream, org.apache.hadoop.hdfs.server.namenode.StreamLimiter, int"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Construct the reader
@param in The stream to read from.
@param logVersion The version of the data coming from the stream.]]>
</doc>
</constructor>
<method name="setMaxOpSize"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="maxOpSize" type="int"/>
</method>
<method name="readOp" return="org.apache.hadoop.hdfs.server.namenode.FSEditLogOp"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="skipBrokenEdits" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Read an operation from the input stream.
Note that the objects returned from this method may be re-used by future
calls to the same method.
@param skipBrokenEdits If true, attempt to skip over damaged parts of
the input stream, rather than throwing an IOException
@return the operation read from the stream, or null at the end of the
file
@throws IOException on error. This function should only throw an
exception when skipBrokenEdits is false.]]>
</doc>
</method>
<method name="scanOp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Similar with decodeOp(), but instead of doing the real decoding, we skip
the content of the op if the length of the editlog is supported.
@return the last txid of the segment, or INVALID_TXID on exception]]>
</doc>
</method>
<doc>
<![CDATA[Class for reading editlog ops from a stream]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Reader -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer -->
<class name="FSEditLogOp.Writer" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Writer" type="org.apache.hadoop.io.DataOutputBuffer"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="writeOp"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="op" type="org.apache.hadoop.hdfs.server.namenode.FSEditLogOp"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Write an operation to the output stream
@param op The operation to write
@throws IOException if an error occurs during writing.]]>
</doc>
</method>
<doc>
<![CDATA[Class for writing editlog ops]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader -->
<class name="FSImageFormat.Loader" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.FSImageFormat.AbstractLoader"/>
<method name="getLoadedImageMd5" return="org.apache.hadoop.io.MD5Hash"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return the MD5 checksum of the image that has been loaded.
@throws IllegalStateException if load() has not yet been called.]]>
</doc>
</method>
<method name="getLoadedImageTxId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="load"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="curFile" type="java.io.File"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="updateBlocksMap"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="file" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
</method>
<method name="getFSDirectoryInLoading" return="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The FSDirectory of the namesystem where the fsimage is loaded]]>
</doc>
</method>
<method name="loadINodeWithLocalName" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="isSnapshotINode" type="boolean"/>
<param name="in" type="java.io.DataInput"/>
<param name="updateINodeMap" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="loadINodeWithLocalName" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="isSnapshotINode" type="boolean"/>
<param name="in" type="java.io.DataInput"/>
<param name="updateINodeMap" type="boolean"/>
<param name="counter" type="org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="loadINodeFileAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Load {@link INodeFileAttributes}.]]>
</doc>
</method>
<method name="loadINodeDirectoryAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getSnapshot" return="org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[A one-shot class responsible for loading an image. The load() function
should be called once, after which the getter methods may be used to retrieve
information about the image that was loaded, if loading was successful.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader -->
<class name="FSImageFormatPBINode.Loader" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="loadPermission" return="org.apache.hadoop.fs.permission.PermissionStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="long"/>
<param name="stringTable" type="java.lang.String[]"/>
</method>
<method name="loadAclEntries" return="com.google.common.collect.ImmutableList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto"/>
<param name="stringTable" type="java.lang.String[]"/>
</method>
<method name="loadXAttrs" return="com.google.common.collect.ImmutableList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto"/>
<param name="stringTable" type="java.lang.String[]"/>
</method>
<method name="loadQuotaByStorageTypeEntries" return="com.google.common.collect.ImmutableList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="proto" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto"/>
</method>
<method name="loadINodeDirectory" return="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="n" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode"/>
<param name="state" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext"/>
</method>
<method name="updateBlocksMap"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="file" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
<param name="bm" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockManager"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver -->
<class name="FSImageFormatPBINode.Saver" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="buildINodeFile" return="org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="file" type="org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes"/>
<param name="state" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext"/>
</method>
<method name="buildINodeDirectory" return="org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dir" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes"/>
<param name="state" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.Loader -->
<class name="FSImageFormatProtobuf.Loader" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.FSImageFormat.AbstractLoader"/>
<method name="getLoadedImageMd5" return="org.apache.hadoop.io.MD5Hash"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLoadedImageTxId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLoaderContext" return="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.Loader -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext -->
<class name="FSImageFormatProtobuf.LoaderContext" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="LoaderContext"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getStringTable" return="java.lang.String[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRefList" return="java.util.ArrayList"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.Saver -->
<class name="FSImageFormatProtobuf.Saver" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="getSavedDigest" return="org.apache.hadoop.io.MD5Hash"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getContext" return="org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSaverContext" return="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="commitSection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="summary" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Builder"/>
<param name="name" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="CHECK_CANCEL_INTERVAL" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.Saver -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext -->
<class name="FSImageFormatProtobuf.SaverContext" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="SaverContext"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getStringMap" return="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext.DeduplicationMap"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRefList" return="java.util.ArrayList"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext.DeduplicationMap -->
<class name="FSImageFormatProtobuf.SaverContext.DeduplicationMap" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext.DeduplicationMap -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName -->
<class name="FSImageFormatProtobuf.SectionName" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="fromString" return="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Supported section name. The order of the enum determines the order of
loading.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo -->
<class name="FSNamesystem.SafeModeInfo" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[SafeModeInfo contains information related to the safe mode.
<p>
An instance of {@link SafeModeInfo} is created when the name node
enters safe mode.
<p>
During name node startup {@link SafeModeInfo} counts the number of
<em>safe blocks</em>, those that have at least the minimal number of
replicas, and calculates the ratio of safe blocks to the total number
of blocks in the system, which is the size of blocks in
{@link FSNamesystem#blockManager}. When the ratio reaches the
{@link #threshold} it starts the SafeModeMonitor daemon in order
to monitor whether the safe mode {@link #extension} is passed.
Then it leaves safe mode and destroys itself.
<p>
If safe mode is turned on manually then the number of safe blocks is
not tracked because the name node is not intended to leave safe mode
automatically in the case.
@see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet -->
<class name="GetDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="GetDelegationTokenServlet"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="doGet"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="req" type="javax.servlet.http.HttpServletRequest"/>
<param name="resp" type="javax.servlet.http.HttpServletResponse"/>
<exception name="ServletException" type="javax.servlet.ServletException"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="PATH_SPEC" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="RENEWER" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Serve delegation tokens over http for use in hftp.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
<class name="HdfsAuditLogger" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
<constructor name="HdfsAuditLogger"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="logAuditEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="succeeded" type="boolean"/>
<param name="userName" type="java.lang.String"/>
<param name="addr" type="java.net.InetAddress"/>
<param name="cmd" type="java.lang.String"/>
<param name="src" type="java.lang.String"/>
<param name="dst" type="java.lang.String"/>
<param name="status" type="org.apache.hadoop.fs.FileStatus"/>
</method>
<method name="logAuditEvent"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="succeeded" type="boolean"/>
<param name="userName" type="java.lang.String"/>
<param name="addr" type="java.net.InetAddress"/>
<param name="cmd" type="java.lang.String"/>
<param name="src" type="java.lang.String"/>
<param name="dst" type="java.lang.String"/>
<param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
<doc>
<![CDATA[Same as
{@link #logAuditEvent(boolean, String, InetAddress, String, String, String, FileStatus)}
with additional parameters related to logging delegation token tracking
IDs.
@param succeeded Whether authorization succeeded.
@param userName Name of the user executing the request.
@param addr Remote address of the request.
@param cmd The requested command.
@param src Path of affected source file.
@param dst Path of affected destination file (if any).
@param stat File information for operations that change the file's metadata
(permissions, owner, times, etc).
@param ugi UserGroupInformation of the current user, or null if not logging
token tracking information
@param dtSecretManager The token secret manager, or null if not logging
token tracking information]]>
</doc>
</method>
<doc>
<![CDATA[Extension of {@link AuditLogger}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo -->
<class name="INode.BlocksMapUpdateInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlocksMapUpdateInfo"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getToDeleteList" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The list of blocks that need to be removed from blocksMap]]>
</doc>
</method>
<method name="addDeleteBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="toDelete" type="org.apache.hadoop.hdfs.protocol.Block"/>
<doc>
<![CDATA[Add a to-be-deleted block into the
{@link BlocksMapUpdateInfo#toDeleteList}
@param toDelete the to-be-deleted block]]>
</doc>
</method>
<method name="removeDeleteBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
</method>
<method name="clear"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Clear {@link BlocksMapUpdateInfo#toDeleteList}]]>
</doc>
</method>
<doc>
<![CDATA[Information used for updating the blocksMap when deleting files.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo -->
<!-- start interface org.apache.hadoop.hdfs.server.namenode.INode.Feature -->
<interface name="INode.Feature" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[INode feature such as {@link FileUnderConstructionFeature}
and {@link DirectoryWithQuotaFeature}.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.INode.Feature -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
<class name="INodeAttributeProvider" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="INodeAttributeProvider"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="start"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Initialize the provider. This method is called at NameNode startup
time.]]>
</doc>
</method>
<method name="stop"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
</doc>
</method>
<method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="fullPath" type="java.lang.String"/>
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
</method>
<method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pathElements" type="java.lang.String[]"/>
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
</method>
<method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
<doc>
<![CDATA[Can be over-ridden by implementations to provide a custom Access Control
Enforcer that can provide an alternate implementation of the
default permission checking logic.
@param defaultEnforcer The Default AccessControlEnforcer
@return The AccessControlEnforcer to use]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
<!-- start interface org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer -->
<interface name="INodeAttributeProvider.AccessControlEnforcer" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="checkPermission"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="fsOwner" type="java.lang.String"/>
<param name="supergroup" type="java.lang.String"/>
<param name="callerUgi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="inodeAttrs" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes[]"/>
<param name="inodes" type="org.apache.hadoop.hdfs.server.namenode.INode[]"/>
<param name="pathByNameArr" type="byte[][]"/>
<param name="snapshotId" type="int"/>
<param name="path" type="java.lang.String"/>
<param name="ancestorIndex" type="int"/>
<param name="doCheckOwner" type="boolean"/>
<param name="ancestorAccess" type="org.apache.hadoop.fs.permission.FsAction"/>
<param name="parentAccess" type="org.apache.hadoop.fs.permission.FsAction"/>
<param name="access" type="org.apache.hadoop.fs.permission.FsAction"/>
<param name="subAccess" type="org.apache.hadoop.fs.permission.FsAction"/>
<param name="ignoreEmptyDir" type="boolean"/>
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
<doc>
<![CDATA[Checks permission on a file system object. Has to throw an Exception
if the filesystem object is not accessessible by the calling Ugi.
@param fsOwner Filesystem owner (The Namenode user)
@param supergroup super user geoup
@param callerUgi UserGroupInformation of the caller
@param inodeAttrs Array of INode attributes for each path element in the
the path
@param inodes Array of INodes for each path element in the path
@param pathByNameArr Array of byte arrays of the LocalName
@param snapshotId the snapshotId of the requested path
@param path Path String
@param ancestorIndex Index of ancestor
@param doCheckOwner perform ownership check
@param ancestorAccess The access required by the ancestor of the path.
@param parentAccess The access required by the parent of the path.
@param access The access required by the path.
@param subAccess If path is a directory, It is the access required of
the path and all the sub-directories. If path is not a
directory, there should ideally be no effect.
@param ignoreEmptyDir Ignore permission checking for empty directory?
@throws AccessControlException]]>
</doc>
</method>
<doc>
<![CDATA[The AccessControlEnforcer allows implementations to override the
default File System permission checking logic enforced on a file system
object]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributes.SnapshotCopy -->
<class name="INodeAttributes.SnapshotCopy" extends="java.lang.Object"
abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
<method name="getLocalNameBytes" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getUserName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getGroupName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFsPermission" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFsPermissionShort" return="short"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPermissionLong" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAclFeature" return="org.apache.hadoop.hdfs.server.namenode.AclFeature"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getModificationTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAccessTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getXAttrFeature" return="org.apache.hadoop.hdfs.server.namenode.XAttrFeature"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A read-only copy of the inode attributes.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributes.SnapshotCopy -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeDirectory -->
<class name="INodeDirectory" extends="org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes"/>
<constructor name="INodeDirectory" type="long, byte[], org.apache.hadoop.fs.permission.PermissionStatus, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[constructor]]>
</doc>
</constructor>
<constructor name="INodeDirectory" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory, boolean, org.apache.hadoop.hdfs.server.namenode.INode.Feature[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Copy constructor
@param other The INodeDirectory to be copied
@param adopt Indicate whether or not need to set the parent field of child
INodes to the new node
@param featuresToCopy any number of features to copy to the new node.
The method will do a reference copy, not a deep copy.]]>
</doc>
</constructor>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="path" type="java.lang.Object"/>
<exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
<exception name="PathIsNotDirectoryException" type="org.apache.hadoop.fs.PathIsNotDirectoryException"/>
<doc>
<![CDATA[Cast INode to INodeDirectory.]]>
</doc>
</method>
<method name="isDirectory" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true unconditionally.]]>
</doc>
</method>
<method name="asDirectory" return="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return this object.]]>
</doc>
</method>
<method name="getLocalStoragePolicyID" return="byte"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStoragePolicyID" return="byte"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getQuotaCounts" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="addSpaceConsumed"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="counts" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
<param name="verify" type="boolean"/>
<exception name="QuotaExceededException" type="org.apache.hadoop.hdfs.protocol.QuotaExceededException"/>
</method>
<method name="getDirectoryWithQuotaFeature" return="org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[If the directory contains a {@link DirectoryWithQuotaFeature}, return it;
otherwise, return null.]]>
</doc>
</method>
<method name="addSnapshotFeature" return="org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="diffs" type="org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList"/>
</method>
<method name="getDirectoryWithSnapshotFeature" return="org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[If feature list contains a {@link DirectoryWithSnapshotFeature}, return it;
otherwise, return null.]]>
</doc>
</method>
<method name="isWithSnapshot" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Is this file has the snapshot feature?]]>
</doc>
</method>
<method name="getDiffs" return="org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshotINode" return="org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="toDetailString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDirectorySnapshottableFeature" return="org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isSnapshottable" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshot" return="org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotName" type="byte[]"/>
</method>
<method name="setSnapshotQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotQuota" type="int"/>
</method>
<method name="addSnapshot" return="org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="int"/>
<param name="name" type="java.lang.String"/>
<exception name="SnapshotException" type="org.apache.hadoop.hdfs.protocol.SnapshotException"/>
<exception name="QuotaExceededException" type="org.apache.hadoop.hdfs.protocol.QuotaExceededException"/>
</method>
<method name="removeSnapshot" return="org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="snapshotName" type="java.lang.String"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
<exception name="SnapshotException" type="org.apache.hadoop.hdfs.protocol.SnapshotException"/>
</method>
<method name="renameSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
<param name="oldName" type="java.lang.String"/>
<param name="newName" type="java.lang.String"/>
<exception name="SnapshotException" type="org.apache.hadoop.hdfs.protocol.SnapshotException"/>
</method>
<method name="addSnapshottableFeature"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[add DirectorySnapshottableFeature]]>
</doc>
</method>
<method name="removeSnapshottableFeature"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[remove DirectorySnapshottableFeature]]>
</doc>
</method>
<method name="replaceChild"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="oldChild" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="newChild" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="inodeMap" type="org.apache.hadoop.hdfs.server.namenode.INodeMap"/>
<doc>
<![CDATA[Replace the given child with a new child. Note that we no longer need to
replace an normal INodeDirectory or INodeFile into an
INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
for child replacement is for reference nodes.]]>
</doc>
</method>
<method name="recordModification"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="latestSnapshotId" type="int"/>
</method>
<method name="saveChild2Snapshot" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="child" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="latestSnapshotId" type="int"/>
<param name="snapshotCopy" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Save the child to the latest snapshot.
@return the child inode, which may be replaced.]]>
</doc>
</method>
<method name="getChild" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="byte[]"/>
<param name="snapshotId" type="int"/>
<doc>
<![CDATA[@param name the name of the child
@param snapshotId
if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
from the corresponding snapshot; otherwise, get the result from
the current directory.
@return the child inode.]]>
</doc>
</method>
<method name="searchChild" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Search for the given INode in the children list and the deleted lists of
snapshots.
@return {@link Snapshot#CURRENT_STATE_ID} if the inode is in the children
list; {@link Snapshot#NO_SNAPSHOT_ID} if the inode is neither in the
children list nor in any snapshot; otherwise the snapshot id of the
corresponding snapshot diff list.]]>
</doc>
</method>
<method name="getChildrenList" return="org.apache.hadoop.hdfs.util.ReadOnlyList"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
<doc>
<![CDATA[@param snapshotId
if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
from the corresponding snapshot; otherwise, get the result from
the current directory.
@return the current children list if the specified snapshot is null;
otherwise, return the children list corresponding to the snapshot.
Note that the returned list is never null.]]>
</doc>
</method>
<method name="removeChild" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="child" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="latestSnapshotId" type="int"/>
<doc>
<![CDATA[Remove the specified child from this directory.]]>
</doc>
</method>
<method name="removeChild" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="child" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Remove the specified child from this directory.
The basic remove method which actually calls children.remove(..).
@param child the child inode to be removed
@return true if the child is removed; false if the child is not found.]]>
</doc>
</method>
<method name="addChild" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="node" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="setModTime" type="boolean"/>
<param name="latestSnapshotId" type="int"/>
<exception name="QuotaExceededException" type="org.apache.hadoop.hdfs.protocol.QuotaExceededException"/>
<doc>
<![CDATA[Add a child inode to the directory.
@param node INode to insert
@param setModTime set modification time for the parent node
not needed when replaying the addition and
the parent already has the proper mod time
@return false if the child with this name already exists;
otherwise, return true;]]>
</doc>
</method>
<method name="addChild" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="node" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
</method>
<method name="computeQuotaUsage" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="blockStoragePolicyId" type="byte"/>
<param name="counts" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
<param name="useCache" type="boolean"/>
<param name="lastSnapshotId" type="int"/>
</method>
<method name="computeQuotaUsage4CurrentDirectory" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="storagePolicyId" type="byte"/>
<param name="counts" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
<doc>
<![CDATA[Add quota usage for this inode excluding children.]]>
</doc>
</method>
<method name="computeContentSummary" return="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="summary" type="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"/>
</method>
<method name="computeDirectoryContentSummary" return="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="summary" type="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"/>
<param name="snapshotId" type="int"/>
</method>
<method name="undoRename4ScrParent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="oldChild" type="org.apache.hadoop.hdfs.server.namenode.INodeReference"/>
<param name="newChild" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<exception name="QuotaExceededException" type="org.apache.hadoop.hdfs.protocol.QuotaExceededException"/>
<doc>
<![CDATA[This method is usually called by the undo section of rename.
Before calling this function, in the rename operation, we replace the
original src node (of the rename operation) with a reference node (WithName
instance) in both the children list and a created list, delete the
reference node from the children list, and add it to the corresponding
deleted list.
To undo the above operations, we have the following steps in particular:
<pre>
1) remove the WithName node from the deleted list (if it exists)
2) replace the WithName node in the created list with srcChild
3) add srcChild back as a child of srcParent. Note that we already add
the node into the created list of a snapshot diff in step 2, we do not need
to add srcChild to the created list of the latest snapshot.
</pre>
We do not need to update quota usage because the old child is in the
deleted list before.
@param oldChild
The reference node to be removed/replaced
@param newChild
The node to be added back
@throws QuotaExceededException should not throw this exception]]>
</doc>
</method>
<method name="undoRename4DstParent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="deletedChild" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="latestSnapshotId" type="int"/>
<exception name="QuotaExceededException" type="org.apache.hadoop.hdfs.protocol.QuotaExceededException"/>
<doc>
<![CDATA[Undo the rename operation for the dst tree, i.e., if the rename operation
(with OVERWRITE option) removes a file/dir from the dst tree, add it back
and delete possible record in the deleted list.]]>
</doc>
</method>
<method name="clearChildren"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Set the children list to null.]]>
</doc>
</method>
<method name="clear"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="cleanSubtreeRecursively" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="snapshot" type="int"/>
<param name="prior" type="int"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
<param name="excludedNodes" type="java.util.Map"/>
<doc>
<![CDATA[Call cleanSubtree(..) recursively down the subtree.]]>
</doc>
</method>
<method name="destroyAndCollectBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
</method>
<method name="cleanSubtree" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="snapshotId" type="int"/>
<param name="priorSnapshotId" type="int"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
</method>
<method name="metadataEquals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="other" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes"/>
<doc>
<![CDATA[Compare the metadata with another INodeDirectory]]>
</doc>
</method>
<method name="dumpTreeRecursively"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.PrintWriter"/>
<param name="prefix" type="java.lang.StringBuilder"/>
<param name="snapshot" type="int"/>
</method>
<method name="dumpTreeRecursively"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.PrintWriter"/>
<param name="prefix" type="java.lang.StringBuilder"/>
<param name="subs" type="java.lang.Iterable"/>
<doc>
<![CDATA[Dump the given subtrees.
@param prefix The prefix string that each line should print.
@param subs The subtrees.]]>
</doc>
</method>
<method name="getChildrenNum" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<field name="DEFAULT_FILES_PER_DIRECTORY" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Directory INode class.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeDirectory -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode -->
<class name="INodeDirectory.SnapshotAndINode" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SnapshotAndINode" type="int, org.apache.hadoop.hdfs.server.namenode.INode"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<field name="snapshotId" type="int"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="inode" type="org.apache.hadoop.hdfs.server.namenode.INode"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A pair of Snapshot and INode objects.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes.CopyWithQuota -->
<class name="INodeDirectoryAttributes.CopyWithQuota" extends="org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes.SnapshotCopy"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CopyWithQuota" type="byte[], org.apache.hadoop.fs.permission.PermissionStatus, org.apache.hadoop.hdfs.server.namenode.AclFeature, long, long, long, org.apache.hadoop.hdfs.util.EnumCounters, org.apache.hadoop.hdfs.server.namenode.XAttrFeature"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="CopyWithQuota" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getQuotaCounts" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes.CopyWithQuota -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes.SnapshotCopy -->
<class name="INodeDirectoryAttributes.SnapshotCopy" extends="org.apache.hadoop.hdfs.server.namenode.INodeAttributes.SnapshotCopy"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes"/>
<constructor name="SnapshotCopy" type="byte[], org.apache.hadoop.fs.permission.PermissionStatus, org.apache.hadoop.hdfs.server.namenode.AclFeature, long, org.apache.hadoop.hdfs.server.namenode.XAttrFeature"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="SnapshotCopy" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getQuotaCounts" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isDirectory" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="metadataEquals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="other" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes"/>
</method>
<doc>
<![CDATA[A copy of the inode directory attributes]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes.SnapshotCopy -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes.SnapshotCopy -->
<class name="INodeFileAttributes.SnapshotCopy" extends="org.apache.hadoop.hdfs.server.namenode.INodeAttributes.SnapshotCopy"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes"/>
<constructor name="SnapshotCopy" type="byte[], org.apache.hadoop.fs.permission.PermissionStatus, org.apache.hadoop.hdfs.server.namenode.AclFeature, long, long, short, long, byte, org.apache.hadoop.hdfs.server.namenode.XAttrFeature"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="SnapshotCopy" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isDirectory" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFileReplication" return="short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPreferredBlockSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLocalStoragePolicyID" return="byte"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHeaderLong" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="metadataEquals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="other" type="org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes"/>
</method>
<doc>
<![CDATA[A copy of the inode file attributes]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes.SnapshotCopy -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeMap -->
<class name="INodeMap" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getMapIterator" return="java.util.Iterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="put"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Add an {@link INode} into the {@link INode} map. Replace the old value if
necessary.
@param inode The {@link INode} to be added to the map.]]>
</doc>
</method>
<method name="remove"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Remove a {@link INode} from the map.
@param inode The {@link INode} to be removed.]]>
</doc>
</method>
<method name="size" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The size of the map.]]>
</doc>
</method>
<method name="get" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="long"/>
<doc>
<![CDATA[Get the {@link INode} with the given id from the map.
@param id ID of the {@link INode}.
@return The {@link INode} in the map with the given id. Return null if no
such {@link INode} in the map.]]>
</doc>
</method>
<method name="clear"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Clear the {@link #map}]]>
</doc>
</method>
<doc>
<![CDATA[Storing all the {@link INode}s and maintaining the mapping between INode ID
and INode.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeMap -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeReference -->
<class name="INodeReference" extends="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="INodeReference" type="org.apache.hadoop.hdfs.server.namenode.INode, org.apache.hadoop.hdfs.server.namenode.INode"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="tryRemoveReference" return="int"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Try to remove the given reference and then return the reference count.
If the given inode is not a reference, return -1;]]>
</doc>
</method>
<method name="getReferredINode" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="setReferredINode"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="referred" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
</method>
<method name="isReference" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="asReference" return="org.apache.hadoop.hdfs.server.namenode.INodeReference"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="isFile" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="asFile" return="org.apache.hadoop.hdfs.server.namenode.INodeFile"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="isDirectory" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="asDirectory" return="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="isSymlink" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="asSymlink" return="org.apache.hadoop.hdfs.server.namenode.INodeSymlink"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLocalNameBytes" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setLocalName"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="byte[]"/>
</method>
<method name="getId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPermissionStatus" return="org.apache.hadoop.fs.permission.PermissionStatus"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="getUserName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="getGroupName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="getFsPermission" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="getFsPermissionShort" return="short"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPermissionLong" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getModificationTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="updateModificationTime" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="mtime" type="long"/>
<param name="latestSnapshotId" type="int"/>
</method>
<method name="setModificationTime"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="modificationTime" type="long"/>
</method>
<method name="getAccessTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="setAccessTime"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="accessTime" type="long"/>
</method>
<method name="getStoragePolicyID" return="byte"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLocalStoragePolicyID" return="byte"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="cleanSubtree" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="snapshot" type="int"/>
<param name="prior" type="int"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
</method>
<method name="destroyAndCollectBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
</method>
<method name="computeContentSummary" return="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="summary" type="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"/>
</method>
<method name="computeQuotaUsage" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="blockStoragePolicyId" type="byte"/>
<param name="counts" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
<param name="useCache" type="boolean"/>
<param name="lastSnapshotId" type="int"/>
</method>
<method name="getSnapshotINode" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="getQuotaCounts" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="clear"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="dumpTreeRecursively"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.PrintWriter"/>
<param name="prefix" type="java.lang.StringBuilder"/>
<param name="snapshot" type="int"/>
</method>
<method name="getDstSnapshotId" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[An anonymous reference to an inode.
This class and its subclasses are used to support multiple access paths.
A file/directory may have multiple access paths when it is stored in some
snapshots and it is renamed/moved to other locations.
For example,
(1) Suppose we have /abc/foo, say the inode of foo is inode(id=1000,name=foo)
(2) create snapshot s0 for /abc
(3) mv /abc/foo /xyz/bar, i.e. inode(id=1000,name=...) is renamed from "foo"
to "bar" and its parent becomes /xyz.
Then, /xyz/bar and /abc/.snapshot/s0/foo are two different access paths to
the same inode, inode(id=1000,name=bar).
With references, we have the following
- /abc has a child ref(id=1001,name=foo).
- /xyz has a child ref(id=1002)
- Both ref(id=1001,name=foo) and ref(id=1002) point to another reference,
ref(id=1003,count=2).
- Finally, ref(id=1003,count=2) points to inode(id=1000,name=bar).
Note 1: For a reference without name, e.g. ref(id=1002), it uses the name
of the referred inode.
Note 2: getParent() always returns the parent in the current state, e.g.
inode(id=1000,name=bar).getParent() returns /xyz but not /abc.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeReference -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference -->
<class name="INodeReference.DstReference" extends="org.apache.hadoop.hdfs.server.namenode.INodeReference"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DstReference" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount, int"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getDstSnapshotId" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="cleanSubtree" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="snapshot" type="int"/>
<param name="prior" type="int"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
</method>
<method name="destroyAndCollectBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
<doc>
<![CDATA[{@inheritDoc}
<br/>
To destroy a DstReference node, we first remove its link with the
referred node. If the reference number of the referred node is <= 0, we
destroy the subtree of the referred node. Otherwise, we clean the
referred node's subtree and delete everything created after the last
rename operation, i.e., everything outside of the scope of the prior
WithName nodes.]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount -->
<class name="INodeReference.WithCount" extends="org.apache.hadoop.hdfs.server.namenode.INodeReference"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="WithCount" type="org.apache.hadoop.hdfs.server.namenode.INodeReference, org.apache.hadoop.hdfs.server.namenode.INode"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getReferenceCount" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="addReference"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ref" type="org.apache.hadoop.hdfs.server.namenode.INodeReference"/>
<doc>
<![CDATA[Increment and then return the reference count.]]>
</doc>
</method>
<method name="removeReference"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ref" type="org.apache.hadoop.hdfs.server.namenode.INodeReference"/>
<doc>
<![CDATA[Decrement and then return the reference count.]]>
</doc>
</method>
<method name="getParentRef" return="org.apache.hadoop.hdfs.server.namenode.INodeReference"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
<doc>
<![CDATA[@return the WithName/DstReference node contained in the given snapshot.]]>
</doc>
</method>
<field name="WITHNAME_COMPARATOR" type="java.util.Comparator"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Compare snapshot with IDs, where null indicates the current status thus
is greater than any non-null snapshot.]]>
</doc>
</field>
<doc>
<![CDATA[An anonymous reference with reference count.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName -->
<class name="INodeReference.WithName" extends="org.apache.hadoop.hdfs.server.namenode.INodeReference"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="WithName" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount, byte[], int"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getLocalNameBytes" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="setLocalName"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="name" type="byte[]"/>
</method>
<method name="getLastSnapshotId" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="computeContentSummary" return="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="summary" type="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"/>
</method>
<method name="computeQuotaUsage" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="blockStoragePolicyId" type="byte"/>
<param name="counts" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
<param name="useCache" type="boolean"/>
<param name="lastSnapshotId" type="int"/>
</method>
<method name="cleanSubtree" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="snapshot" type="int"/>
<param name="prior" type="int"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
</method>
<method name="destroyAndCollectBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bsps" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
</method>
<doc>
<![CDATA[A reference with a fixed name.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.INodesInPath -->
<class name="INodesInPath" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="replace" return="org.apache.hadoop.hdfs.server.namenode.INodesInPath"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="iip" type="org.apache.hadoop.hdfs.server.namenode.INodesInPath"/>
<param name="pos" type="int"/>
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Replace an inode of the given INodesInPath in the given position. We do a
deep copy of the INode array.
@param pos the position of the replacement
@param inode the new inode
@return a new INodesInPath instance]]>
</doc>
</method>
<method name="append" return="org.apache.hadoop.hdfs.server.namenode.INodesInPath"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="iip" type="org.apache.hadoop.hdfs.server.namenode.INodesInPath"/>
<param name="child" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="childName" type="byte[]"/>
<doc>
<![CDATA[Extend a given INodesInPath with a child INode. The child INode will be
appended to the end of the new INodesInPath.]]>
</doc>
</method>
<method name="getLatestSnapshotId" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[For non-snapshot paths, return the latest snapshot id found in the path.]]>
</doc>
</method>
<method name="getPathSnapshotId" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[For snapshot paths, return the id of the snapshot specified in the path.
For non-snapshot paths, return {@link Snapshot#CURRENT_STATE_ID}.]]>
</doc>
</method>
<method name="getINode" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="i" type="int"/>
<doc>
<![CDATA[@return the i-th inode if i >= 0;
otherwise, i < 0, return the (length + i)-th inode.]]>
</doc>
</method>
<method name="getLastINode" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the last inode.]]>
</doc>
</method>
<method name="getPathComponents" return="byte[][]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the full path in string form]]>
</doc>
</method>
<method name="getParentPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pos" type="int"/>
</method>
<method name="getPath" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="offset" type="int"/>
<param name="length" type="int"/>
<doc>
<![CDATA[@param offset start endpoint (inclusive)
@param length number of path components
@return sub-list of the path]]>
</doc>
</method>
<method name="length" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getReadOnlyINodes" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getINodesArray" return="org.apache.hadoop.hdfs.server.namenode.INode[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getParentINodesInPath" return="org.apache.hadoop.hdfs.server.namenode.INodesInPath"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return an INodesInPath instance containing all the INodes in the parent
path. We do a deep copy here.]]>
</doc>
</method>
<method name="getExistingINodes" return="org.apache.hadoop.hdfs.server.namenode.INodesInPath"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return a new INodesInPath instance that only contains exisitng INodes.
Note that this method only handles non-snapshot paths.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Contains INodes information resolved from a given path.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.INodesInPath -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException -->
<class name="JournalManager.CorruptionException" extends="java.io.IOException"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CorruptionException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Indicate that a journal is cannot be used to load a certain range of
edits.
This exception occurs in the case of a gap in the transactions, or a
corrupt edit file.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.JournalSet -->
<class name="JournalSet" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.JournalManager"/>
<method name="format"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="nsInfo" type="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="hasSomeData" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="startLogSegment" return="org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="txId" type="long"/>
<param name="layoutVersion" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="finalizeLogSegment"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="firstTxId" type="long"/>
<param name="lastTxId" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="isOpen" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="selectInputStreams"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="streams" type="java.util.Collection"/>
<param name="fromTxId" type="long"/>
<param name="inProgressOk" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[In this function, we get a bunch of streams from all of our JournalManager
objects. Then we add these to the collection one by one.
@param streams The collection to add the streams to. It may or
may not be sorted-- this is up to the caller.
@param fromTxId The transaction ID to start looking for streams at
@param inProgressOk Should we consider unfinalized streams?]]>
</doc>
</method>
<method name="chainAndMakeRedundantStreams"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="outStreams" type="java.util.Collection"/>
<param name="allStreams" type="java.util.PriorityQueue"/>
<param name="fromTxId" type="long"/>
</method>
<method name="isEmpty" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns true if there are no journals, all redundant journals are disabled,
or any required journals are disabled.
@return True if there no journals, all redundant journals are disabled,
or any required journals are disabled.]]>
</doc>
</method>
<method name="setOutputBufferCapacity"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="size" type="int"/>
</method>
<method name="purgeLogsOlderThan"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="minTxIdToKeep" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="recoverUnfinalizedSegments"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getEditLogManifest" return="org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="fromTxId" type="long"/>
<doc>
<![CDATA[Return a manifest of what finalized edit logs are available. All available
edit logs are returned starting from the transaction id passed. If
'fromTxId' falls in the middle of a log, that log is returned as well.
@param fromTxId Starting transaction id to read the logs.
@return RemoteEditLogManifest object.]]>
</doc>
</method>
<method name="discardSegments"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="startTxId" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="doPreUpgrade"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="doUpgrade"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="storage" type="org.apache.hadoop.hdfs.server.common.Storage"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="doFinalize"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="canRollBack" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
<param name="prevStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
<param name="targetLayoutVersion" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="doRollback"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getJournalCTime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="EDIT_LOG_INPUT_STREAM_COMPARATOR" type="java.util.Comparator"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Manages a collection of Journals. None of the methods are synchronized, it is
assumed that FSEditLog methods, that use this class, use proper
synchronization.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.JournalSet -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext.RequestStopException -->
<class name="MetaRecoveryContext.RequestStopException" extends="java.io.IOException"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="RequestStopException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Exception thrown when the user has requested processing to stop.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext.RequestStopException -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode.NameNodeHAContext -->
<class name="NameNode.NameNodeHAContext" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.ha.HAContext"/>
<constructor name="NameNodeHAContext"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<method name="setState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="s" type="org.apache.hadoop.hdfs.server.namenode.ha.HAState"/>
</method>
<method name="getState" return="org.apache.hadoop.hdfs.server.namenode.ha.HAState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="startActiveServices"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="stopActiveServices"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="startStandbyServices"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="prepareToStopStandbyServices"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="ServiceFailedException" type="org.apache.hadoop.ha.ServiceFailedException"/>
</method>
<method name="stopStandbyServices"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="writeLock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="writeUnlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="checkOperation"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="op" type="org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory"/>
<exception name="StandbyException" type="org.apache.hadoop.ipc.StandbyException"/>
<doc>
<![CDATA[Check if an operation of given category is allowed]]>
</doc>
</method>
<method name="allowStaleReads" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Class used to expose {@link NameNode} as context to {@link HAState}]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode.NameNodeHAContext -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory -->
<class name="NameNode.OperationCategory" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Categories of operations supported by the namenode.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature -->
<class name="NameNodeLayoutVersion.Feature" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature"/>
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Enums for features that change the layout version.
<br><br>
To add a new layout version:
<ul>
<li>Define a new enum constant with a short enum name, the new layout version
and description of the added feature.</li>
<li>When adding a layout version with an ancestor that is not same as
its immediate predecessor, use the constructor where a specific ancestor
can be passed.
</li>
</ul>]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature -->
<!-- start interface org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean -->
<interface name="NameNodeMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getVersion" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the version of Hadoop.
@return the version]]>
</doc>
</method>
<method name="getSoftwareVersion" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the version of software running on the Namenode
@return a string representing the version]]>
</doc>
</method>
<method name="getUsed" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the used space by data nodes.
@return the used space by data nodes]]>
</doc>
</method>
<method name="getFree" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets total non-used raw bytes.
@return total non-used raw bytes]]>
</doc>
</method>
<method name="getTotal" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets total raw bytes including non-dfs used space.
@return the total raw bytes including non-dfs used space]]>
</doc>
</method>
<method name="getSafemode" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the safemode status
@return the safemode status]]>
</doc>
</method>
<method name="isUpgradeFinalized" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Checks if upgrade is finalized.
@return true, if upgrade is finalized]]>
</doc>
</method>
<method name="getRollingUpgradeStatus" return="org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo.Bean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the RollingUpgrade information.
@return Rolling upgrade information if an upgrade is in progress. Else
(e.g. if there is no upgrade or the upgrade is finalized), returns null.]]>
</doc>
</method>
<method name="getNonDfsUsedSpace" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets total used space by data nodes for non DFS purposes such as storing
temporary files on the local file system
@return the non dfs space of the cluster]]>
</doc>
</method>
<method name="getPercentUsed" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total used space by data nodes as percentage of total capacity
@return the percentage of used space on the cluster.]]>
</doc>
</method>
<method name="getPercentRemaining" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total remaining space by data nodes as percentage of total
capacity
@return the percentage of the remaining space on the cluster]]>
</doc>
</method>
<method name="getCacheUsed" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns the amount of cache used by the datanode (in bytes).]]>
</doc>
</method>
<method name="getCacheCapacity" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns the total cache capacity of the datanode (in bytes).]]>
</doc>
</method>
<method name="getBlockPoolUsedSpace" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the total space used by the block pools of this namenode]]>
</doc>
</method>
<method name="getPercentBlockPoolUsed" return="float"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the total space used by the block pool as percentage of total capacity]]>
</doc>
</method>
<method name="getTotalBlocks" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total numbers of blocks on the cluster.
@return the total number of blocks of the cluster]]>
</doc>
</method>
<method name="getTotalFiles" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total number of files on the cluster
@return the total number of files on the cluster]]>
</doc>
</method>
<method name="getNumberOfMissingBlocks" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total number of missing blocks on the cluster
@return the total number of missing blocks on the cluster]]>
</doc>
</method>
<method name="getNumberOfMissingBlocksWithReplicationFactorOne" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the total number of missing blocks on the cluster with
replication factor 1
@return the total number of missing blocks on the cluster with
replication factor 1]]>
</doc>
</method>
<method name="getThreads" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the number of threads.
@return the number of threads]]>
</doc>
</method>
<method name="getLiveNodes" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the live node information of the cluster.
@return the live node information]]>
</doc>
</method>
<method name="getDeadNodes" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the dead node information of the cluster.
@return the dead node information]]>
</doc>
</method>
<method name="getDecomNodes" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the decommissioning node information of the cluster.
@return the decommissioning node information]]>
</doc>
</method>
<method name="getClusterId" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the cluster id.
@return the cluster id]]>
</doc>
</method>
<method name="getBlockPoolId" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the block pool id.
@return the block pool id]]>
</doc>
</method>
<method name="getNameDirStatuses" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get status information about the directories storing image and edits logs
of the NN.
@return the name dir status information, as a JSON string.]]>
</doc>
</method>
<method name="getNodeUsage" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get Max, Median, Min and Standard Deviation of DataNodes usage.
@return the DataNode usage information, as a JSON string.]]>
</doc>
</method>
<method name="getNameJournalStatus" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get status information about the journals of the NN.
@return the name journal status information, as a JSON string.]]>
</doc>
</method>
<method name="getJournalTransactionInfo" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get information about the transaction ID, including the last applied
transaction ID and the most recent checkpoint's transaction ID]]>
</doc>
</method>
<method name="getNNStarted" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the NN start time
@return the NN start time]]>
</doc>
</method>
<method name="getCompileInfo" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the compilation information which contains date, user and branch
@return the compilation information, as a JSON string.]]>
</doc>
</method>
<method name="getCorruptFiles" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the list of corrupt files
@return the list of corrupt files, as a JSON string.]]>
</doc>
</method>
<method name="getDistinctVersionCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the number of distinct versions of live datanodes
@return the number of distinct versions of live datanodes]]>
</doc>
</method>
<method name="getDistinctVersions" return="java.util.Map"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the number of live datanodes for each distinct versions
@return the number of live datanodes for each distinct versions]]>
</doc>
</method>
<doc>
<![CDATA[This is the JMX management interface for namenode information]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean -->
<!-- start interface org.apache.hadoop.hdfs.server.namenode.NameNodeStatusMXBean -->
<interface name="NameNodeStatusMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getNNRole" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the NameNode role.
@return the NameNode role.]]>
</doc>
</method>
<method name="getState" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the NameNode state.
@return the NameNode state.]]>
</doc>
</method>
<method name="getHostAndPort" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the host and port colon separated.
@return host and port colon separated.]]>
</doc>
</method>
<method name="isSecurityEnabled" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets if security is enabled.
@return true, if security is enabled.]]>
</doc>
</method>
<method name="getLastHATransitionTime" return="long"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the most recent HA transition time in milliseconds from the epoch.
@return the most recent HA transition time in milliseconds from the epoch.]]>
</doc>
</method>
<doc>
<![CDATA[This is the JMX management interface for NameNode status information]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.NameNodeStatusMXBean -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType -->
<class name="NNStorage.NameNodeDirType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isOfType" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
</method>
<doc>
<![CDATA[Implementation of StorageDirType specific to namenode storage
A Storage directory could be of type IMAGE which stores only fsimage,
or of type EDITS which stores edits or of type IMAGE_AND_EDITS which
stores both fsimage and edits.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile -->
<class name="NNStorage.NameNodeFile" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager -->
<class name="NNStorageRetentionManager" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="NNStorageRetentionManager" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.namenode.NNStorage, org.apache.hadoop.hdfs.server.namenode.LogsPurgeable, org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="NNStorageRetentionManager" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.namenode.NNStorage, org.apache.hadoop.hdfs.server.namenode.LogsPurgeable"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[The NNStorageRetentionManager is responsible for inspecting the storage
directories of the NN and enforcing a retention policy on checkpoints
and edit logs.
It delegates the actual removal of files to a StoragePurger
implementation, which might delete the files or instead copy them to
a filer or HDFS for later analysis.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil -->
<class name="NNUpgradeUtil" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="NNUpgradeUtil"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="renameCurToTmp"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Rename the existing current dir to previous.tmp, and create a new empty
current dir.]]>
</doc>
</method>
<method name="doUpgrade"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
<param name="storage" type="org.apache.hadoop.hdfs.server.common.Storage"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Perform the upgrade of the storage dir to the given storage info. The new
storage info is written into the current directory, and the previous.tmp
directory is renamed to previous.
@param sd the storage directory to upgrade
@param storage info about the new upgraded versions.
@throws IOException in the event of error]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.Quota -->
<class name="Quota" extends="java.lang.Enum"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.namenode.Quota[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.namenode.Quota"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Quota types.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.Quota -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.Quota.Counts -->
<class name="Quota.Counts" extends="org.apache.hadoop.hdfs.util.EnumCounters"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="newInstance" return="org.apache.hadoop.hdfs.server.namenode.Quota.Counts"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="namespace" type="long"/>
<param name="storagespace" type="long"/>
<doc>
<![CDATA[@return a new counter with the given namespace and storagespace usages.]]>
</doc>
</method>
<method name="newInstance" return="org.apache.hadoop.hdfs.server.namenode.Quota.Counts"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Counters for quota counts.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.Quota.Counts -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry -->
<class name="QuotaByStorageTypeEntry" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getStorageType" return="org.apache.hadoop.fs.StorageType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getQuota" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry.Builder -->
<class name="QuotaByStorageTypeEntry.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setStorageType" return="org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.fs.StorageType"/>
</method>
<method name="setQuota" return="org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="quota" type="long"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.QuotaByStorageTypeEntry.Builder -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.QuotaCounts -->
<class name="QuotaCounts" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="add"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
</method>
<method name="subtract"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
</method>
<method name="negation" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns a QuotaCounts whose value is {@code (-this)}.
@return {@code -this}]]>
</doc>
</method>
<method name="getNameSpace" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setNameSpace"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="nameSpaceCount" type="long"/>
</method>
<method name="addNameSpace"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="nsDelta" type="long"/>
</method>
<method name="getStorageSpace" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setStorageSpace"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="spaceCount" type="long"/>
</method>
<method name="addStorageSpace"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dsDelta" type="long"/>
</method>
<method name="getTypeSpaces" return="org.apache.hadoop.hdfs.util.EnumCounters"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="addTypeSpace"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.fs.StorageType"/>
<param name="delta" type="long"/>
</method>
<method name="anyNsSsCountGreaterOrEqual" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="val" type="long"/>
</method>
<method name="anyTypeSpaceCountGreaterOrEqual" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="val" type="long"/>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Counters for namespace, storage space and storage type space quota and usage.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.QuotaCounts -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.QuotaCounts.Builder -->
<class name="QuotaCounts.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="nameSpace" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="val" type="long"/>
</method>
<method name="storageSpace" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="val" type="long"/>
</method>
<method name="typeSpaces" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="val" type="org.apache.hadoop.hdfs.util.EnumCounters"/>
</method>
<method name="typeSpaces" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="val" type="long"/>
</method>
<method name="quotaCount" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.server.namenode.QuotaCounts"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.QuotaCounts.Builder -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet -->
<class name="RenewDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="RenewDelegationTokenServlet"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="doGet"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="req" type="javax.servlet.http.HttpServletRequest"/>
<param name="resp" type="javax.servlet.http.HttpServletResponse"/>
<exception name="ServletException" type="javax.servlet.ServletException"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="PATH_SPEC" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="TOKEN" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Renew delegation tokens over http for use in hftp.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.StoragePolicySummary -->
<class name="StoragePolicySummary" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Aggregate the storage type information for a set of blocks]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.StoragePolicySummary -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException -->
<class name="TransferFsImage.HttpGetFailedException" extends="java.io.IOException"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getResponseCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpPutFailedException -->
<class name="TransferFsImage.HttpPutFailedException" extends="java.io.IOException"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getResponseCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpPutFailedException -->
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.ha">
<!-- start class org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider -->
<class name="AbstractNNFailoverProxyProvider" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.io.retry.FailoverProxyProvider"/>
<constructor name="AbstractNNFailoverProxyProvider"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="useLogicalURI" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Inquire whether logical HA URI is used for the implementation. If it is
used, a special token handling may be needed to make sure a token acquired
from a node in the HA pair can be used against the other node.
@return true if logical HA URI is used. false, if not used.]]>
</doc>
</method>
<method name="setFallbackToSimpleAuth"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<doc>
<![CDATA[Set for tracking if a secure client falls back to simple auth. This method
is synchronized only to stifle a Findbugs warning.
@param fallbackToSimpleAuth - set to true or false during this method to
indicate if a secure client falls back to simple auth]]>
</doc>
</method>
<field name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider -->
<class name="ConfiguredFailoverProxyProvider" extends="org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ConfiguredFailoverProxyProvider" type="org.apache.hadoop.conf.Configuration, java.net.URI, java.lang.Class"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInterface" return="java.lang.Class"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getProxy" return="org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Lazily initialize the RPC proxy object.]]>
</doc>
</method>
<method name="performFailover"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="currentProxy" type="T"/>
</method>
<method name="close"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Close all the proxy objects which have been opened over the lifetime of
this proxy provider.]]>
</doc>
</method>
<method name="useLogicalURI" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Logical URI is required for this failover proxy provider.]]>
</doc>
</method>
<doc>
<![CDATA[A FailoverProxyProvider implementation which allows one to configure two URIs
to connect to during fail-over. The first configured address is tried first,
and on a fail-over event the other address is tried.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.ha.IPFailoverProxyProvider -->
<class name="IPFailoverProxyProvider" extends="org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="IPFailoverProxyProvider" type="org.apache.hadoop.conf.Configuration, java.net.URI, java.lang.Class"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInterface" return="java.lang.Class"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getProxy" return="org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="performFailover"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="currentProxy" type="T"/>
<doc>
<![CDATA[Nothing to do for IP failover]]>
</doc>
</method>
<method name="close"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Close the proxy,]]>
</doc>
</method>
<method name="useLogicalURI" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Logical URI is not used for IP failover.]]>
</doc>
</method>
<doc>
<![CDATA[A NNFailoverProxyProvider implementation which works on IP failover setup.
Only one proxy is used to connect to both servers and switching between
the servers is done by the environment/infrastructure, which guarantees
clients can consistently reach only one node at a time.
Clients with a live connection will likely get connection reset after an
IP failover. This case will be handled by the
FailoverOnNetworkExceptionRetry retry policy. I.e. if the call is
not idempotent, it won't get retried.
A connection reset while setting up a connection (i.e. before sending a
request) will be handled in ipc client.
The namenode URI must contain a resolvable host name.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.ha.IPFailoverProxyProvider -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.ha.WrappedFailoverProxyProvider -->
<class name="WrappedFailoverProxyProvider" extends="org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="WrappedFailoverProxyProvider" type="org.apache.hadoop.io.retry.FailoverProxyProvider"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Wrap the given instance of an old FailoverProxyProvider.]]>
</doc>
</constructor>
<method name="getInterface" return="java.lang.Class"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getProxy" return="org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="performFailover"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="currentProxy" type="T"/>
</method>
<method name="close"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Close the proxy,]]>
</doc>
</method>
<method name="useLogicalURI" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Assume logical URI is used for old proxy provider implementations.]]>
</doc>
</method>
<doc>
<![CDATA[A NNFailoverProxyProvider implementation which wrapps old implementations
directly implementing the {@link FailoverProxyProvider} interface.
It is assumed that the old impelmentation is using logical URI.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.ha.WrappedFailoverProxyProvider -->
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
<!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
<class name="NameNodeMetrics" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="totalFileOps" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="create" return="org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="r" type="org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole"/>
</method>
<method name="getJvmMetrics" return="org.apache.hadoop.metrics2.source.JvmMetrics"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="shutdown"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrGetBlockLocations"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrFilesCreated"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrCreateFileOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrFilesAppended"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrAddBlockOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrGetAdditionalDatanodeOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrFilesRenamed"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrFilesTruncated"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrFilesDeleted"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="delta" type="long"/>
</method>
<method name="incrDeleteFileOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrGetListingOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrFilesInGetListingOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="delta" type="int"/>
</method>
<method name="incrFileInfoOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrCreateSymlinkOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrGetLinkTargetOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrAllowSnapshotOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrDisAllowSnapshotOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrCreateSnapshotOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrDeleteSnapshotOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrRenameSnapshotOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrListSnapshottableDirOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrSnapshotDiffReportOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrBlockReceivedAndDeletedOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrStorageBlockReportOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="addTransaction"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="latency" type="long"/>
</method>
<method name="incrTransactionsBatchedInSync"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="addSync"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="elapsed" type="long"/>
</method>
<method name="setFsImageLoadTime"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="elapsed" type="long"/>
</method>
<method name="addBlockReport"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="latency" type="long"/>
</method>
<method name="addCacheBlockReport"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="latency" type="long"/>
</method>
<method name="setSafeModeTime"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="elapsed" type="long"/>
</method>
<method name="addGetEdit"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="latency" type="long"/>
</method>
<method name="addGetImage"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="latency" type="long"/>
</method>
<method name="addPutImage"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="latency" type="long"/>
</method>
<doc>
<![CDATA[This class is for maintaining the various NameNode activity statistics
and publishing them through the metrics interfaces.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff -->
<class name="DirectoryWithSnapshotFeature.DirectoryDiff" extends="org.apache.hadoop.hdfs.server.namenode.snapshot.AbstractINodeDiff"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getChildrenDiff" return="org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[The difference of an {@link INodeDirectory} between two snapshots.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList -->
<class name="DirectoryWithSnapshotFeature.DirectoryDiffList" extends="org.apache.hadoop.hdfs.server.namenode.snapshot.AbstractINodeDiffList"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DirectoryDiffList"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="replaceChild" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.util.Diff.ListType"/>
<param name="oldChild" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<param name="newChild" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Replace the given child in the created/deleted list, if there is any.]]>
</doc>
</method>
<method name="removeChild" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.util.Diff.ListType"/>
<param name="child" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Remove the given child in the created/deleted list, if there is any.]]>
</doc>
</method>
<method name="findSnapshotDeleted" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="child" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
<doc>
<![CDATA[Find the corresponding snapshot whose deleted list contains the given
inode.
@return the id of the snapshot. {@link Snapshot#NO_SNAPSHOT_ID} if the
given inode is not in any of the snapshot.]]>
</doc>
</method>
<doc>
<![CDATA[A list of directory diffs.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff -->
<class name="FileDiff" extends="org.apache.hadoop.hdfs.server.namenode.snapshot.AbstractINodeDiff"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getFileSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the file size in the snapshot.]]>
</doc>
</method>
<method name="setBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blocks" type="org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous[]"/>
<doc>
<![CDATA[Copy block references into the snapshot
up to the current {@link #fileSize}.
Should be done only once.]]>
</doc>
</method>
<method name="getBlocks" return="org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="destroyAndCollectSnapshotBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
</method>
<doc>
<![CDATA[The difference of an {@link INodeFile} between two snapshots.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList -->
<class name="FileDiffList" extends="org.apache.hadoop.hdfs.server.namenode.snapshot.AbstractINodeDiffList"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="FileDiffList"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="destroyAndCollectSnapshotBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
</method>
<method name="saveSelf2Snapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="latestSnapshotId" type="int"/>
<param name="iNodeFile" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
<param name="snapshotCopy" type="org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes"/>
<param name="withBlocks" type="boolean"/>
</method>
<method name="findEarlierSnapshotBlocks" return="org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="findLaterSnapshotBlocks" return="org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<doc>
<![CDATA[A list of FileDiffs for storing snapshot data.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.FSImageFormatPBSnapshot.Loader -->
<class name="FSImageFormatPBSnapshot.Loader" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="Loader" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem, org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.Loader"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="loadINodeReferenceSection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.InputStream"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[The sequence of the ref node in refList must be strictly the same with
the sequence in fsimage]]>
</doc>
</method>
<method name="loadSnapshotSection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.InputStream"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Load the snapshots section from fsimage. Also add snapshottable feature
to snapshottable directories.]]>
</doc>
</method>
<method name="loadSnapshotDiffSection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.InputStream"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Load the snapshot diff section from fsimage.]]>
</doc>
</method>
<doc>
<![CDATA[Loading snapshot related information from protobuf based FSImage]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.FSImageFormatPBSnapshot.Loader -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.FSImageFormatPBSnapshot.Saver -->
<class name="FSImageFormatPBSnapshot.Saver" extends="java.lang.Object"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="Saver" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.Saver, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Builder, org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext, org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="serializeSnapshotSection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.OutputStream"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[save all the snapshottable directories and snapshots to fsimage]]>
</doc>
</method>
<method name="serializeINodeReferenceSection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.OutputStream"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[This can only be called after serializing both INode_Dir and SnapshotDiff]]>
</doc>
</method>
<method name="serializeSnapshotDiffSection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.OutputStream"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[save all the snapshot diff to fsimage]]>
</doc>
</method>
<doc>
<![CDATA[Saving snapshot related information to protobuf based FSImage]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.FSImageFormatPBSnapshot.Saver -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root -->
<class name="Snapshot.Root" extends="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getChildrenList" return="org.apache.hadoop.hdfs.util.ReadOnlyList"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshotId" type="int"/>
</method>
<method name="getChild" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="byte[]"/>
<param name="snapshotId" type="int"/>
</method>
<method name="computeContentSummary" return="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="summary" type="org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext"/>
</method>
<method name="getFullPathName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[The root directory of the snapshot.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat -->
<class name="SnapshotFSImageFormat" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SnapshotFSImageFormat"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="saveSnapshots"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="current" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"/>
<param name="out" type="java.io.DataOutput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Save snapshots and snapshot quota for a snapshottable directory.
@param current The directory that the snapshots belongs to.
@param out The {@link DataOutput} to write.
@throws IOException]]>
</doc>
</method>
<method name="saveDirectoryDiffList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dir" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"/>
<param name="out" type="java.io.DataOutput"/>
<param name="referenceMap" type="org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="saveFileDiffList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="file" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
<param name="out" type="java.io.DataOutput"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="loadFileDiffList" return="org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<param name="loader" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="loadCreated" return="org.apache.hadoop.hdfs.server.namenode.INode"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="createdNodeName" type="byte[]"/>
<param name="parent" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Load a node stored in the created list from fsimage.
@param createdNodeName The name of the created node.
@param parent The directory that the created list belongs to.
@return The created node.]]>
</doc>
</method>
<method name="loadSnapshotList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="snapshottableParent" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"/>
<param name="numSnapshots" type="int"/>
<param name="in" type="java.io.DataInput"/>
<param name="loader" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Load snapshots and snapshotQuota for a Snapshottable directory.
@param snapshottableParent
The snapshottable directory for loading.
@param numSnapshots
The number of snapshots that the directory has.
@param loader
The loader]]>
</doc>
</method>
<method name="loadDirectoryDiffList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dir" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"/>
<param name="in" type="java.io.DataInput"/>
<param name="loader" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
directory.
@param dir
The snapshottable directory for loading.
@param in
The {@link DataInput} instance to read.
@param loader
The loader]]>
</doc>
</method>
<doc>
<![CDATA[A helper class defining static methods for reading/writing snapshot related
information from/to FSImage.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap -->
<class name="SnapshotFSImageFormat.ReferenceMap" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReferenceMap"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="writeINodeReferenceWithCount"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="withCount" type="org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount"/>
<param name="out" type="java.io.DataOutput"/>
<param name="writeUnderConstruction" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="toProcessSubtree" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="long"/>
</method>
<method name="loadINodeReferenceWithCount" return="org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="isSnapshotINode" type="boolean"/>
<param name="in" type="java.io.DataInput"/>
<param name="loader" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[A reference map for fsimage serialization.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager -->
<class name="SnapshotManager" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotStatsMXBean"/>
<constructor name="SnapshotManager" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setSnapshottable"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
<param name="checkNestedSnapshottable" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the given directory as a snapshottable directory.
If the path is already a snapshottable directory, update the quota.]]>
</doc>
</method>
<method name="addSnapshottable"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dir" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"/>
<doc>
<![CDATA[Add the given snapshottable directory to {@link #snapshottables}.]]>
</doc>
</method>
<method name="removeSnapshottable"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="toRemove" type="java.util.List"/>
<doc>
<![CDATA[Remove snapshottable directories from {@link #snapshottables}]]>
</doc>
</method>
<method name="resetSnapshottable"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the given snapshottable directory to non-snapshottable.
@throws SnapshotException if there are snapshots in the directory.]]>
</doc>
</method>
<method name="getSnapshottableRoot" return="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="iip" type="org.apache.hadoop.hdfs.server.namenode.INodesInPath"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Find the source root directory where the snapshot will be taken
for a given path.
@return Snapshottable directory.
@throws IOException
Throw IOException when the given path does not lead to an
existing snapshottable directory.]]>
</doc>
</method>
<method name="createSnapshot" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="iip" type="org.apache.hadoop.hdfs.server.namenode.INodesInPath"/>
<param name="snapshotRoot" type="java.lang.String"/>
<param name="snapshotName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a snapshot of the given path.
It is assumed that the caller will perform synchronization.
@param iip the INodes resolved from the snapshottable directory's path
@param snapshotName
The name of the snapshot.
@throws IOException
Throw IOException when 1) the given path does not lead to an
existing snapshottable directory, and/or 2) there exists a
snapshot with the given name for the directory, and/or 3)
snapshot number exceeds quota]]>
</doc>
</method>
<method name="deleteSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="iip" type="org.apache.hadoop.hdfs.server.namenode.INodesInPath"/>
<param name="snapshotName" type="java.lang.String"/>
<param name="collectedBlocks" type="org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo"/>
<param name="removedINodes" type="java.util.List"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Delete a snapshot for a snapshottable directory
@param snapshotName Name of the snapshot to be deleted
@param collectedBlocks Used to collect information to update blocksMap
@throws IOException]]>
</doc>
</method>
<method name="renameSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="iip" type="org.apache.hadoop.hdfs.server.namenode.INodesInPath"/>
<param name="snapshotRoot" type="java.lang.String"/>
<param name="oldSnapshotName" type="java.lang.String"/>
<param name="newSnapshotName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Rename the given snapshot
@param oldSnapshotName
Old name of the snapshot
@param newSnapshotName
New name of the snapshot
@throws IOException
Throw IOException when 1) the given path does not lead to an
existing snapshottable directory, and/or 2) the snapshot with the
old name does not exist for the directory, and/or 3) there exists
a snapshot with the new name for the directory]]>
</doc>
</method>
<method name="getNumSnapshottableDirs" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getNumSnapshots" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.DataOutput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Write {@link #snapshotCounter}, {@link #numSnapshots},
and all snapshots to the DataOutput.]]>
</doc>
</method>
<method name="read" return="java.util.Map"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<param name="loader" type="org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Read values of {@link #snapshotCounter}, {@link #numSnapshots}, and
all snapshots from the DataInput]]>
</doc>
</method>
<method name="getSnapshottableDirListing" return="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="userName" type="java.lang.String"/>
<doc>
<![CDATA[List all the snapshottable directories that are owned by the current user.
@param userName Current user name.
@return Snapshottable directories that are owned by the current user,
represented as an array of {@link SnapshottableDirectoryStatus}. If
{@code userName} is null, return all the snapshottable dirs.]]>
</doc>
</method>
<method name="diff" return="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="iip" type="org.apache.hadoop.hdfs.server.namenode.INodesInPath"/>
<param name="snapshotRootPath" type="java.lang.String"/>
<param name="from" type="java.lang.String"/>
<param name="to" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Compute the difference between two snapshots of a directory, or between a
snapshot of the directory and its current tree.]]>
</doc>
</method>
<method name="clearSnapshottableDirs"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getMaxSnapshotID" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns the maximum allowable snapshot ID based on the bit width of the
snapshot ID.
@return maximum allowable snapshot ID.]]>
</doc>
</method>
<method name="registerMXBean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="shutdown"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshottableDirectories" return="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus.Bean[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSnapshots" return="org.apache.hadoop.hdfs.protocol.SnapshotInfo.Bean[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toBean" return="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus.Bean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="d" type="org.apache.hadoop.hdfs.server.namenode.INodeDirectory"/>
</method>
<method name="toBean" return="org.apache.hadoop.hdfs.protocol.SnapshotInfo.Bean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="s" type="org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot"/>
</method>
<doc>
<![CDATA[Manage snapshottable directories and their snapshots.
This class includes operations that create, access, modify snapshots and/or
snapshot-related data. In general, the locking structure of snapshot
operations is: <br>
1. Lock the {@link FSNamesystem} lock in {@link FSNamesystem} before calling
into {@link SnapshotManager} methods.<br>
2. Lock the {@link FSDirectory} lock for the {@link SnapshotManager} methods
if necessary.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager -->
<!-- start interface org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotStatsMXBean -->
<interface name="SnapshotStatsMXBean" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getSnapshottableDirectories" return="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus.Bean[]"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return the list of snapshottable directories
@return the list of snapshottable directories]]>
</doc>
</method>
<method name="getSnapshots" return="org.apache.hadoop.hdfs.protocol.SnapshotInfo.Bean[]"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return the list of snapshots
@return the list of snapshots]]>
</doc>
</method>
<doc>
<![CDATA[This is an interface used to retrieve statistic information related to
snapshots]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotStatsMXBean -->
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.top">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
<!-- start class org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op -->
<class name="RollingWindowManager.Op" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Op" type="java.lang.String, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="addUser"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="u" type="org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User"/>
</method>
<method name="getOpType" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTopUsers" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTotalCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Represents an operation within a TopWindow. It contains a ranked
set of the top users for the operation.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow -->
<class name="RollingWindowManager.TopWindow" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="TopWindow" type="int"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="addOp"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="op" type="org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op"/>
</method>
<method name="getWindowLenMs" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getOps" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Represents a snapshot of the rolling window. It contains one Op per
operation in the window, with ranked users for each Op.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow -->
<!-- start class org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User -->
<class name="RollingWindowManager.User" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="User" type="java.lang.String, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getUser" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Represents a user who called an Op within a TopWindow. Specifies the
user and the number of times the user called the operation.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User -->
</package>
<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
<!-- start class org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods -->
<class name="NamenodeWebHdfsMethods" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="NamenodeWebHdfsMethods"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getRemoteAddress" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the remote client address.]]>
</doc>
</method>
<method name="getRemoteIp" return="java.net.InetAddress"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isWebHdfsInvocation" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns true if a WebHdfs request is in progress. Akin to
{@link Server#isRpcInvocation()}.]]>
</doc>
</method>
<method name="putRoot" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="delegation" type="org.apache.hadoop.hdfs.web.resources.DelegationParam"/>
<param name="username" type="org.apache.hadoop.hdfs.web.resources.UserParam"/>
<param name="doAsUser" type="org.apache.hadoop.hdfs.web.resources.DoAsParam"/>
<param name="op" type="org.apache.hadoop.hdfs.web.resources.PutOpParam"/>
<param name="destination" type="org.apache.hadoop.hdfs.web.resources.DestinationParam"/>
<param name="owner" type="org.apache.hadoop.hdfs.web.resources.OwnerParam"/>
<param name="group" type="org.apache.hadoop.hdfs.web.resources.GroupParam"/>
<param name="permission" type="org.apache.hadoop.hdfs.web.resources.PermissionParam"/>
<param name="overwrite" type="org.apache.hadoop.hdfs.web.resources.OverwriteParam"/>
<param name="bufferSize" type="org.apache.hadoop.hdfs.web.resources.BufferSizeParam"/>
<param name="replication" type="org.apache.hadoop.hdfs.web.resources.ReplicationParam"/>
<param name="blockSize" type="org.apache.hadoop.hdfs.web.resources.BlockSizeParam"/>
<param name="modificationTime" type="org.apache.hadoop.hdfs.web.resources.ModificationTimeParam"/>
<param name="accessTime" type="org.apache.hadoop.hdfs.web.resources.AccessTimeParam"/>
<param name="renameOptions" type="org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam"/>
<param name="createParent" type="org.apache.hadoop.hdfs.web.resources.CreateParentParam"/>
<param name="delegationTokenArgument" type="org.apache.hadoop.hdfs.web.resources.TokenArgumentParam"/>
<param name="aclPermission" type="org.apache.hadoop.hdfs.web.resources.AclPermissionParam"/>
<param name="xattrName" type="org.apache.hadoop.hdfs.web.resources.XAttrNameParam"/>
<param name="xattrValue" type="org.apache.hadoop.hdfs.web.resources.XAttrValueParam"/>
<param name="xattrSetFlag" type="org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam"/>
<param name="snapshotName" type="org.apache.hadoop.hdfs.web.resources.SnapshotNameParam"/>
<param name="oldSnapshotName" type="org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam"/>
<param name="excludeDatanodes" type="org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Handle HTTP PUT request for the root.]]>
</doc>
</method>
<method name="put" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="delegation" type="org.apache.hadoop.hdfs.web.resources.DelegationParam"/>
<param name="username" type="org.apache.hadoop.hdfs.web.resources.UserParam"/>
<param name="doAsUser" type="org.apache.hadoop.hdfs.web.resources.DoAsParam"/>
<param name="path" type="org.apache.hadoop.hdfs.web.resources.UriFsPathParam"/>
<param name="op" type="org.apache.hadoop.hdfs.web.resources.PutOpParam"/>
<param name="destination" type="org.apache.hadoop.hdfs.web.resources.DestinationParam"/>
<param name="owner" type="org.apache.hadoop.hdfs.web.resources.OwnerParam"/>
<param name="group" type="org.apache.hadoop.hdfs.web.resources.GroupParam"/>
<param name="permission" type="org.apache.hadoop.hdfs.web.resources.PermissionParam"/>
<param name="overwrite" type="org.apache.hadoop.hdfs.web.resources.OverwriteParam"/>
<param name="bufferSize" type="org.apache.hadoop.hdfs.web.resources.BufferSizeParam"/>
<param name="replication" type="org.apache.hadoop.hdfs.web.resources.ReplicationParam"/>
<param name="blockSize" type="org.apache.hadoop.hdfs.web.resources.BlockSizeParam"/>
<param name="modificationTime" type="org.apache.hadoop.hdfs.web.resources.ModificationTimeParam"/>
<param name="accessTime" type="org.apache.hadoop.hdfs.web.resources.AccessTimeParam"/>
<param name="renameOptions" type="org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam"/>
<param name="createParent" type="org.apache.hadoop.hdfs.web.resources.CreateParentParam"/>
<param name="delegationTokenArgument" type="org.apache.hadoop.hdfs.web.resources.TokenArgumentParam"/>
<param name="aclPermission" type="org.apache.hadoop.hdfs.web.resources.AclPermissionParam"/>
<param name="xattrName" type="org.apache.hadoop.hdfs.web.resources.XAttrNameParam"/>
<param name="xattrValue" type="org.apache.hadoop.hdfs.web.resources.XAttrValueParam"/>
<param name="xattrSetFlag" type="org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam"/>
<param name="snapshotName" type="org.apache.hadoop.hdfs.web.resources.SnapshotNameParam"/>
<param name="oldSnapshotName" type="org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam"/>
<param name="excludeDatanodes" type="org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Handle HTTP PUT request.]]>
</doc>
</method>
<method name="postRoot" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="delegation" type="org.apache.hadoop.hdfs.web.resources.DelegationParam"/>
<param name="username" type="org.apache.hadoop.hdfs.web.resources.UserParam"/>
<param name="doAsUser" type="org.apache.hadoop.hdfs.web.resources.DoAsParam"/>
<param name="op" type="org.apache.hadoop.hdfs.web.resources.PostOpParam"/>
<param name="concatSrcs" type="org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam"/>
<param name="bufferSize" type="org.apache.hadoop.hdfs.web.resources.BufferSizeParam"/>
<param name="excludeDatanodes" type="org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam"/>
<param name="newLength" type="org.apache.hadoop.hdfs.web.resources.NewLengthParam"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Handle HTTP POST request for the root.]]>
</doc>
</method>
<method name="post" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="delegation" type="org.apache.hadoop.hdfs.web.resources.DelegationParam"/>
<param name="username" type="org.apache.hadoop.hdfs.web.resources.UserParam"/>
<param name="doAsUser" type="org.apache.hadoop.hdfs.web.resources.DoAsParam"/>
<param name="path" type="org.apache.hadoop.hdfs.web.resources.UriFsPathParam"/>
<param name="op" type="org.apache.hadoop.hdfs.web.resources.PostOpParam"/>
<param name="concatSrcs" type="org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam"/>
<param name="bufferSize" type="org.apache.hadoop.hdfs.web.resources.BufferSizeParam"/>
<param name="excludeDatanodes" type="org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam"/>
<param name="newLength" type="org.apache.hadoop.hdfs.web.resources.NewLengthParam"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Handle HTTP POST request.]]>
</doc>
</method>
<method name="getRoot" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="delegation" type="org.apache.hadoop.hdfs.web.resources.DelegationParam"/>
<param name="username" type="org.apache.hadoop.hdfs.web.resources.UserParam"/>
<param name="doAsUser" type="org.apache.hadoop.hdfs.web.resources.DoAsParam"/>
<param name="op" type="org.apache.hadoop.hdfs.web.resources.GetOpParam"/>
<param name="offset" type="org.apache.hadoop.hdfs.web.resources.OffsetParam"/>
<param name="length" type="org.apache.hadoop.hdfs.web.resources.LengthParam"/>
<param name="renewer" type="org.apache.hadoop.hdfs.web.resources.RenewerParam"/>
<param name="bufferSize" type="org.apache.hadoop.hdfs.web.resources.BufferSizeParam"/>
<param name="xattrNames" type="java.util.List"/>
<param name="xattrEncoding" type="org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam"/>
<param name="excludeDatanodes" type="org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam"/>
<param name="fsAction" type="org.apache.hadoop.hdfs.web.resources.FsActionParam"/>
<param name="tokenKind" type="org.apache.hadoop.hdfs.web.resources.TokenKindParam"/>
<param name="tokenService" type="org.apache.hadoop.hdfs.web.resources.TokenServiceParam"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Handle HTTP GET request for the root.]]>
</doc>
</method>
<method name="get" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="delegation" type="org.apache.hadoop.hdfs.web.resources.DelegationParam"/>
<param name="username" type="org.apache.hadoop.hdfs.web.resources.UserParam"/>
<param name="doAsUser" type="org.apache.hadoop.hdfs.web.resources.DoAsParam"/>
<param name="path" type="org.apache.hadoop.hdfs.web.resources.UriFsPathParam"/>
<param name="op" type="org.apache.hadoop.hdfs.web.resources.GetOpParam"/>
<param name="offset" type="org.apache.hadoop.hdfs.web.resources.OffsetParam"/>
<param name="length" type="org.apache.hadoop.hdfs.web.resources.LengthParam"/>
<param name="renewer" type="org.apache.hadoop.hdfs.web.resources.RenewerParam"/>
<param name="bufferSize" type="org.apache.hadoop.hdfs.web.resources.BufferSizeParam"/>
<param name="xattrNames" type="java.util.List"/>
<param name="xattrEncoding" type="org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam"/>
<param name="excludeDatanodes" type="org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam"/>
<param name="fsAction" type="org.apache.hadoop.hdfs.web.resources.FsActionParam"/>
<param name="tokenKind" type="org.apache.hadoop.hdfs.web.resources.TokenKindParam"/>
<param name="tokenService" type="org.apache.hadoop.hdfs.web.resources.TokenServiceParam"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Handle HTTP GET request.]]>
</doc>
</method>
<method name="deleteRoot" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="delegation" type="org.apache.hadoop.hdfs.web.resources.DelegationParam"/>
<param name="username" type="org.apache.hadoop.hdfs.web.resources.UserParam"/>
<param name="doAsUser" type="org.apache.hadoop.hdfs.web.resources.DoAsParam"/>
<param name="op" type="org.apache.hadoop.hdfs.web.resources.DeleteOpParam"/>
<param name="recursive" type="org.apache.hadoop.hdfs.web.resources.RecursiveParam"/>
<param name="snapshotName" type="org.apache.hadoop.hdfs.web.resources.SnapshotNameParam"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Handle HTTP DELETE request for the root.]]>
</doc>
</method>
<method name="delete" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="delegation" type="org.apache.hadoop.hdfs.web.resources.DelegationParam"/>
<param name="username" type="org.apache.hadoop.hdfs.web.resources.UserParam"/>
<param name="doAsUser" type="org.apache.hadoop.hdfs.web.resources.DoAsParam"/>
<param name="path" type="org.apache.hadoop.hdfs.web.resources.UriFsPathParam"/>
<param name="op" type="org.apache.hadoop.hdfs.web.resources.DeleteOpParam"/>
<param name="recursive" type="org.apache.hadoop.hdfs.web.resources.RecursiveParam"/>
<param name="snapshotName" type="org.apache.hadoop.hdfs.web.resources.SnapshotNameParam"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Handle HTTP DELETE request.]]>
</doc>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Web-hdfs NameNode implementation.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods -->
</package>
<package name="org.apache.hadoop.hdfs.server.protocol">
<!-- start class org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand -->
<class name="BalancerBandwidthCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BalancerBandwidthCommand" type="long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Balancer Bandwidth Command constructor.
@param bandwidth Blanacer bandwidth in bytes per second.]]>
</doc>
</constructor>
<method name="getBalancerBandwidthValue" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get current value of the max balancer bandwidth in bytes per second.
@return bandwidth Blanacer bandwidth in bytes per second for this datanode.]]>
</doc>
</method>
<doc>
<![CDATA[Balancer bandwidth command instructs each datanode to change its value for
the max amount of network bandwidth it may use during the block balancing
operation.
The Balancer Bandwidth Command contains the new bandwidth value as its
payload. The bandwidth value is in bytes per second.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.BlockReportContext -->
<class name="BlockReportContext" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockReportContext" type="int, int, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getTotalRpcs" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getCurRpc" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getReportId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[The context of the block report.
This is a set of fields that the Datanode sends to provide context about a
block report RPC. The context includes a unique 64-bit ID which
identifies the block report as a whole. It also includes the total number
of RPCs which this block report is split into, and the index into that
total for the current RPC.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.BlockReportContext -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeStorage -->
<class name="DatanodeStorage" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DatanodeStorage" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create a storage with {@link State#NORMAL} and {@link StorageType#DEFAULT}.]]>
</doc>
</constructor>
<constructor name="DatanodeStorage" type="java.lang.String, org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State, org.apache.hadoop.fs.StorageType"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getStorageID" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getState" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStorageType" return="org.apache.hadoop.fs.StorageType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="generateUuid" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Generate new storage ID. The format of this string can be changed
in the future without requiring that old storage IDs be updated.
@return unique storage ID]]>
</doc>
</method>
<method name="isValidStorageId" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="storageID" type="java.lang.String"/>
<doc>
<![CDATA[Verify that a given string is a storage ID in the "DS-..uuid.." format.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="other" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Class captures information of a storage in Datanode.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeStorage -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State -->
<class name="DatanodeStorage.State" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[The state of the storage.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport -->
<class name="DatanodeStorageReport" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DatanodeStorageReport" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo, org.apache.hadoop.hdfs.server.protocol.StorageReport[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getDatanodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStorageReports" return="org.apache.hadoop.hdfs.server.protocol.StorageReport[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Class captures information of a datanode and its storages.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.FencedException -->
<class name="FencedException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="FencedException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[If a previous user of a resource tries to use a shared resource, after
fenced by another user, this exception is thrown.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.FencedException -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo.Capability -->
<class name="NamespaceInfo.Capability" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo.Capability[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo.Capability"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getMask" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo.Capability -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo -->
<class name="ReceivedDeletedBlockInfo" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReceivedDeletedBlockInfo"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ReceivedDeletedBlockInfo" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus, java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
</method>
<method name="getDelHints" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setDelHints"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="hints" type="java.lang.String"/>
</method>
<method name="getStatus" return="org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="blockEquals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
</method>
<method name="isDeletedBlock" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A data structure to store the blocks in an incremental block report.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus -->
<class name="ReceivedDeletedBlockInfo.BlockStatus" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="fromCode" return="org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="code" type="int"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.RemoteEditLog -->
<class name="RemoteEditLog" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.lang.Comparable"/>
<constructor name="RemoteEditLog"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="RemoteEditLog" type="long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="RemoteEditLog" type="long, long, boolean"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getStartTxId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getEndTxId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isInProgress" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="compareTo" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="log" type="org.apache.hadoop.hdfs.server.protocol.RemoteEditLog"/>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="GET_START_TXID" type="com.google.common.base.Function"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Guava <code>Function</code> which applies {@link #getStartTxId()}]]>
</doc>
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.RemoteEditLog -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest -->
<class name="RemoteEditLogManifest" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="RemoteEditLogManifest"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="RemoteEditLogManifest" type="java.util.List"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getLogs" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[An enumeration of logs available on a remote NameNode.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.StorageBlockReport -->
<class name="StorageBlockReport" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="StorageBlockReport" type="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage, org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getStorage" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Block report for a Datanode storage]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.StorageBlockReport -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks -->
<class name="StorageReceivedDeletedBlocks" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="StorageReceivedDeletedBlocks" type="java.lang.String, org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="StorageReceivedDeletedBlocks" type="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage, org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getStorageID" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStorage" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Report of block received and deleted per Datanode
storage.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.StorageReport -->
<class name="StorageReport" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="StorageReport" type="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage, boolean, long, long, long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getStorage" return="org.apache.hadoop.hdfs.server.protocol.DatanodeStorage"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isFailed" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getCapacity" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDfsUsed" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRemaining" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBlockPoolUsed" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.server.protocol.StorageReport[]"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Utilization report for a Datanode storage]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.StorageReport -->
<!-- start class org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary -->
<class name="VolumeFailureSummary" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="VolumeFailureSummary" type="java.lang.String[], long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Creates a new VolumeFailureSummary.
@param failedStorageLocations storage locations that have failed
@param lastVolumeFailureDate date/time of last volume failure in
milliseconds since epoch
@param estimatedCapacityLostTotal estimate of capacity lost in bytes]]>
</doc>
</constructor>
<method name="getFailedStorageLocations" return="java.lang.String[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns each storage location that has failed, sorted.
@return each storage location that has failed, sorted]]>
</doc>
</method>
<method name="getLastVolumeFailureDate" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns the date/time of the last volume failure in milliseconds since
epoch.
@return date/time of last volume failure in milliseconds since epoch]]>
</doc>
</method>
<method name="getEstimatedCapacityLostTotal" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns estimate of capacity lost. This is said to be an estimate, because
in some cases it's impossible to know the capacity of the volume, such as if
we never had a chance to query its capacity before the failure occurred.
@return estimate of capacity lost in bytes]]>
</doc>
</method>
<doc>
<![CDATA[Summarizes information about data volume failures on a DataNode.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary -->
</package>
<package name="org.apache.hadoop.hdfs.shortcircuit">
<!-- start class org.apache.hadoop.hdfs.shortcircuit.DfsClientShm -->
<class name="DfsClientShm" extends="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.net.unix.DomainSocketWatcher.Handler"/>
<method name="getEndpointShmManager" return="org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.EndpointShmManager"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPeer" return="org.apache.hadoop.hdfs.net.DomainPeer"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isDisconnected" return="boolean"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Determine if the shared memory segment is disconnected from the DataNode.
This must be called with the DfsClientShmManager lock held.
@return True if the shared memory segment is stale.]]>
</doc>
</method>
<method name="handle" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="sock" type="org.apache.hadoop.net.unix.DomainSocket"/>
<doc>
<![CDATA[Handle the closure of the UNIX domain socket associated with this shared
memory segment by marking this segment as stale.
If there are no slots associated with this shared memory segment, it will
be freed immediately in this function.]]>
</doc>
</method>
<doc>
<![CDATA[DfsClientShm is a subclass of ShortCircuitShm which is used by the
DfsClient.
When the UNIX domain socket associated with this shared memory segment
closes unexpectedly, we mark the slots inside this segment as disconnected.
ShortCircuitReplica objects that contain disconnected slots are stale,
and will not be used to service new reads or mmap operations.
However, in-progress read or mmap operations will continue to proceed.
Once the last slot is deallocated, the segment can be safely munmapped.
Slots may also become stale because the associated replica has been deleted
on the DataNode. In this case, the DataNode will clear the 'valid' bit.
The client will then see these slots as stale (see
#{ShortCircuitReplica#isStale}).]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.DfsClientShm -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo -->
<class name="DfsClientShmManager.PerDatanodeVisitorInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<field name="full" type="java.util.TreeMap"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="notFull" type="java.util.TreeMap"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="disabled" type="boolean"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo -->
<!-- start interface org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor -->
<interface name="DfsClientShmManager.Visitor" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="visit"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="java.util.HashMap"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
</interface>
<!-- end interface org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory -->
<class name="DomainSocketFactory" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DomainSocketFactory" type="org.apache.hadoop.hdfs.DFSClient.Conf"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPathInfo" return="org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="addr" type="java.net.InetSocketAddress"/>
<param name="conf" type="org.apache.hadoop.hdfs.DFSClient.Conf"/>
<doc>
<![CDATA[Get information about a domain socket path.
@param addr The inet address to use.
@param conf The client configuration.
@return Information about the socket path.]]>
</doc>
</method>
<method name="createSocket" return="org.apache.hadoop.net.unix.DomainSocket"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathInfo"/>
<param name="socketTimeout" type="int"/>
</method>
<method name="disableShortCircuitForPath"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
</method>
<method name="disableDomainSocketPath"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
</method>
<method name="clearPathMap"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathInfo -->
<class name="DomainSocketFactory.PathInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPathState" return="org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathInfo -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathState -->
<class name="DomainSocketFactory.PathState" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathState[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathState"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getUsableForDataTransfer" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getUsableForShortCircuit" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory.PathState -->
<!-- start interface org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor -->
<interface name="ShortCircuitCache.CacheVisitor" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="visit"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="numOutstandingMmaps" type="int"/>
<param name="replicas" type="java.util.Map"/>
<param name="failedLoads" type="java.util.Map"/>
<param name="evictable" type="java.util.Map"/>
<param name="evictableMmapped" type="java.util.Map"/>
</method>
</interface>
<!-- end interface org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor -->
<!-- start interface org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.ShortCircuitReplicaCreator -->
<interface name="ShortCircuitCache.ShortCircuitReplicaCreator" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="createShortCircuitReplicaInfo" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Attempt to create a ShortCircuitReplica object.
This callback will be made without holding any locks.
@return a non-null ShortCircuitReplicaInfo object.]]>
</doc>
</method>
</interface>
<!-- end interface org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.ShortCircuitReplicaCreator -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo -->
<class name="ShortCircuitReplicaInfo" extends="java.lang.Object"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="ShortCircuitReplicaInfo"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ShortCircuitReplicaInfo" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ShortCircuitReplicaInfo" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getReplica" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getInvalidTokenException" return="org.apache.hadoop.security.token.SecretManager.InvalidToken"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm -->
<class name="ShortCircuitShm" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ShortCircuitShm" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId, java.io.FileInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create the ShortCircuitShm.
@param shmId The ID to use.
@param stream The stream that we're going to use to create this
shared memory segment.
Although this is a FileInputStream, we are going to
assume that the underlying file descriptor is writable
as well as readable. It would be more appropriate to use
a RandomAccessFile here, but that class does not have
any public accessor which returns a FileDescriptor,
unlike FileInputStream.]]>
</doc>
</constructor>
<method name="getShmId" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</method>
<method name="isEmpty" return="boolean"
abstract="false" native="false" synchronized="true"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Determine if this shared memory object is empty.
@return True if the shared memory object is empty.]]>
</doc>
</method>
<method name="isFull" return="boolean"
abstract="false" native="false" synchronized="true"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Determine if this shared memory object is full.
@return True if the shared memory object is full.]]>
</doc>
</method>
<method name="allocAndRegisterSlot" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot"
abstract="false" native="false" synchronized="true"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="blockId" type="org.apache.hadoop.hdfs.ExtendedBlockId"/>
<doc>
<![CDATA[Allocate a new slot and register it.
This function chooses an empty slot, initializes it, and then returns
the relevant Slot object.
@return The new slot.]]>
</doc>
</method>
<method name="getSlot" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot"
abstract="false" native="false" synchronized="true"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="slotIdx" type="int"/>
<exception name="InvalidRequestException" type="org.apache.hadoop.fs.InvalidRequestException"/>
</method>
<method name="registerSlot" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot"
abstract="false" native="false" synchronized="true"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="slotIdx" type="int"/>
<param name="blockId" type="org.apache.hadoop.hdfs.ExtendedBlockId"/>
<exception name="InvalidRequestException" type="org.apache.hadoop.fs.InvalidRequestException"/>
<doc>
<![CDATA[Register a slot.
This function looks at a slot which has already been initialized (by
another process), and registers it with us. Then, it returns the
relevant Slot object.
@return The slot.
@throws InvalidRequestException
If the slot index we're trying to allocate has not been
initialized, or is already in use.]]>
</doc>
</method>
<method name="unregisterSlot"
abstract="false" native="false" synchronized="true"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="slotIdx" type="int"/>
<doc>
<![CDATA[Unregisters a slot.
This doesn't alter the contents of the slot. It just means
@param slotIdx Index of the slot to unregister.]]>
</doc>
</method>
<method name="slotIterator" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Iterate over all allocated slots.
Note that this method isn't safe if
@return The slot iterator.]]>
</doc>
</method>
<method name="free"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="BYTES_PER_SLOT" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A shared memory segment used to implement short-circuit reads.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId -->
<class name="ShortCircuitShm.ShmId" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.lang.Comparable"/>
<constructor name="ShmId" type="long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="createRandom" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Generate a random ShmId.
We generate ShmIds randomly to prevent a malicious client from
successfully guessing one and using that to interfere with another
client.]]>
</doc>
</method>
<method name="getHi" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLo" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="compareTo" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="other" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId"/>
</method>
<doc>
<![CDATA[Identifies a DfsClientShm.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot -->
<class name="ShortCircuitShm.Slot" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getShm" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the short-circuit memory segment associated with this Slot.
@return The enclosing short-circuit memory segment.]]>
</doc>
</method>
<method name="getBlockId" return="org.apache.hadoop.hdfs.ExtendedBlockId"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the ExtendedBlockId associated with this slot.
@return The ExtendedBlockId of this slot.]]>
</doc>
</method>
<method name="getSlotId" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the SlotId of this slot, containing both shmId and slotIdx.
@return The SlotId of this slot.]]>
</doc>
</method>
<method name="getSlotIdx" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the Slot index.
@return The index of this slot.]]>
</doc>
</method>
<method name="isValid" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="makeValid"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="makeInvalid"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isAnchorable" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="makeAnchorable"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="makeUnanchorable"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isAnchored" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="addAnchor" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Try to add an anchor for a given slot.
When a slot is anchored, we know that the block it refers to is resident
in memory.
@return True if the slot is anchored.]]>
</doc>
</method>
<method name="removeAnchor"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Remove an anchor for a given slot.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A slot containing information about a replica.
The format is:
word 0
bit 0:32 Slot flags (see below).
bit 33:63 Anchor count.
word 1:7
Reserved for future use, such as statistics.
Padding is also useful for avoiding false sharing.
Little-endian versus big-endian is not relevant here since both the client
and the server reside on the same computer and use the same orientation.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId -->
<class name="ShortCircuitShm.SlotId" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SlotId" type="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId, int"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getShmId" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSlotIdx" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Uniquely identifies a slot.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId -->
<!-- start class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotIterator -->
<class name="ShortCircuitShm.SlotIterator" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.util.Iterator"/>
<constructor name="SlotIterator"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="hasNext" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="next" return="org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="remove"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotIterator -->
</package>
<package name="org.apache.hadoop.hdfs.tools">
<!-- start class org.apache.hadoop.hdfs.tools.AdminHelper -->
<class name="AdminHelper" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="AdminHelper"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Helper methods for CacheAdmin/CryptoAdmin/StoragePolicyAdmin]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.AdminHelper -->
<!-- start class org.apache.hadoop.hdfs.tools.DFSHAAdmin -->
<class name="DFSHAAdmin" extends="org.apache.hadoop.ha.HAAdmin"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DFSHAAdmin"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setErrOut"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="errOut" type="java.io.PrintStream"/>
</method>
<method name="setOut"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="out" type="java.io.PrintStream"/>
</method>
<method name="setConf"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
</method>
<method name="addSecurityConfiguration" return="org.apache.hadoop.conf.Configuration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[Add the requisite security principal settings to the given Configuration,
returning a copy.
@param conf the original config
@return a copy with the security settings added]]>
</doc>
</method>
<method name="resolveTarget" return="org.apache.hadoop.ha.HAServiceTarget"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="nnId" type="java.lang.String"/>
<doc>
<![CDATA[Try to map the given namenode ID to its service address.]]>
</doc>
</method>
<method name="getUsageString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</method>
<method name="runCmd" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="argv" type="java.lang.String[]"/>
<exception name="Exception" type="java.lang.Exception"/>
</method>
<method name="getTargetIds" return="java.util.Collection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="namenodeToActivate" type="java.lang.String"/>
<doc>
<![CDATA[returns the list of all namenode ids for the given configuration]]>
</doc>
</method>
<method name="main"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="argv" type="java.lang.String[]"/>
<exception name="Exception" type="java.lang.Exception"/>
</method>
<doc>
<![CDATA[Class to extend HAAdmin to do a little bit of HDFS-specific configuration.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.DFSHAAdmin -->
<!-- start class org.apache.hadoop.hdfs.tools.GetConf -->
<class name="GetConf" extends="org.apache.hadoop.conf.Configured"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.util.Tool"/>
<method name="run" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="args" type="java.lang.String[]"/>
<exception name="Exception" type="java.lang.Exception"/>
</method>
<method name="main"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="args" type="java.lang.String[]"/>
<exception name="Exception" type="java.lang.Exception"/>
</method>
<doc>
<![CDATA[Tool for getting configuration information from a configuration file.
Adding more options:
<ul>
<li>
If adding a simple option to get a value corresponding to a key in the
configuration, use regular {@link GetConf.CommandHandler}.
See {@link GetConf.Command#EXCLUDE_FILE} example.
</li>
<li>
If adding an option that is does not return a value for a key, add
a subclass of {@link GetConf.CommandHandler} and set it up in
{@link GetConf.Command}.
See {@link GetConf.Command#NAMENODE} for example.
Add for the new option added, a map entry with the corresponding
{@link GetConf.CommandHandler}.
</ul>]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.GetConf -->
<!-- start class org.apache.hadoop.hdfs.tools.StoragePolicyAdmin -->
<class name="StoragePolicyAdmin" extends="org.apache.hadoop.conf.Configured"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.util.Tool"/>
<constructor name="StoragePolicyAdmin" type="org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="main"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="argsArray" type="java.lang.String[]"/>
<exception name="Exception" type="java.lang.Exception"/>
</method>
<method name="run" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="args" type="java.lang.String[]"/>
<exception name="Exception" type="java.lang.Exception"/>
</method>
<doc>
<![CDATA[This class implements block storage policy operations.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.StoragePolicyAdmin -->
</package>
<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
<!-- start class org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags -->
<class name="OfflineEditsViewer.Flags" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Flags"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPrintToScreen" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setPrintToScreen"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFixTxIds" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setFixTxIds"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRecoveryMode" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setRecoveryMode"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags -->
<!-- start class org.apache.hadoop.hdfs.tools.offlineEditsViewer.TeeOutputStream -->
<class name="TeeOutputStream" extends="java.io.OutputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="TeeOutputStream" type="java.io.OutputStream[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="c" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="byte[]"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="byte[]"/>
<param name="off" type="int"/>
<param name="len" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="flush"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[A TeeOutputStream writes its output to multiple output streams.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.offlineEditsViewer.TeeOutputStream -->
</package>
<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
<!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageDelimitedTextWriter -->
<class name="PBImageDelimitedTextWriter" extends="org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageTextWriter"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getEntry" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="parent" type="java.lang.String"/>
<param name="inode" type="org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode"/>
</method>
<doc>
<![CDATA[A PBImageDelimitedTextWriter generates a text representation of the PB fsimage,
with each element separated by a delimiter string. All of the elements
common to both inodes and inodes-under-construction are included. When
processing an fsimage with a layout version that did not include an
element, such as AccessTime, the output file will include a column
for the value, but no value will be included.
Individual block information for each file is not currently included.
The default delimiter is tab, as this is an unlikely value to be included in
an inode path or other text metadata. The delimiter value can be via the
constructor.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageDelimitedTextWriter -->
<!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.WebImageViewer -->
<class name="WebImageViewer" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Closeable"/>
<constructor name="WebImageViewer" type="java.net.InetSocketAddress"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="start"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="fsimage" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Start WebImageViewer and wait until the thread is interrupted.
@param fsimage the fsimage to load.
@throws IOException if failed to load the fsimage.]]>
</doc>
</method>
<method name="initServer"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="fsimage" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Start WebImageViewer.
@param fsimage the fsimage to load.
@throws IOException if fail to load the fsimage.]]>
</doc>
</method>
<method name="getPort" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the listening port.
@return the port WebImageViewer is listening on]]>
</doc>
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[WebImageViewer loads a fsimage and exposes read-only WebHDFS API for its
namespace.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.WebImageViewer -->
<!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.XmlImageVisitor -->
<class name="XmlImageVisitor" extends="org.apache.hadoop.hdfs.tools.offlineImageViewer.TextWriterImageVisitor"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="XmlImageVisitor" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="XmlImageVisitor" type="java.lang.String, boolean"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<doc>
<![CDATA[An XmlImageVisitor walks over an fsimage structure and writes out
an equivalent XML document that contains the fsimage's components.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.XmlImageVisitor -->
</package>
<package name="org.apache.hadoop.hdfs.tools.snapshot">
</package>
<package name="org.apache.hadoop.hdfs.util">
<!-- start class org.apache.hadoop.hdfs.util.AtomicFileOutputStream -->
<class name="AtomicFileOutputStream" extends="java.io.FilterOutputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="AtomicFileOutputStream" type="java.io.File"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
</constructor>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="abort"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Close the atomic file, but do not "commit" the temporary file
on top of the destination. This should be used if there is a failure
in writing.]]>
</doc>
</method>
<doc>
<![CDATA[A FileOutputStream that has the property that it will only show
up at its destination once it has been entirely written and flushed
to disk. While being written, it will use a .tmp suffix.
When the output stream is closed, it is flushed, fsynced, and
will be moved into place, overwriting any file that already
exists at that location.
<b>NOTE</b>: on Windows platforms, it will not atomically
replace the target file - instead the target file is deleted
before this one is moved into place.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.AtomicFileOutputStream -->
<!-- start class org.apache.hadoop.hdfs.util.ByteArrayManager.Conf -->
<class name="ByteArrayManager.Conf" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Conf" type="int, int, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
</class>
<!-- end class org.apache.hadoop.hdfs.util.ByteArrayManager.Conf -->
<!-- start class org.apache.hadoop.hdfs.util.DataTransferThrottler -->
<class name="DataTransferThrottler" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DataTransferThrottler" type="long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param bandwidthPerSec bandwidth allowed in bytes per second.]]>
</doc>
</constructor>
<constructor name="DataTransferThrottler" type="long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param period in milliseconds. Bandwidth is enforced over this
period.
@param bandwidthPerSec bandwidth allowed in bytes per second.]]>
</doc>
</constructor>
<method name="getBandwidth" return="long"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return current throttle bandwidth in bytes per second.]]>
</doc>
</method>
<method name="setBandwidth"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bytesPerSecond" type="long"/>
<doc>
<![CDATA[Sets throttle bandwidth. This takes affect latest by the end of current
period.]]>
</doc>
</method>
<method name="throttle"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="numOfBytes" type="long"/>
<doc>
<![CDATA[Given the numOfBytes sent/received since last time throttle was called,
make the current thread sleep if I/O rate is too fast
compared to the given bandwidth.
@param numOfBytes
number of bytes sent/received since last time throttle was called]]>
</doc>
</method>
<method name="throttle"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="numOfBytes" type="long"/>
<param name="canceler" type="org.apache.hadoop.hdfs.util.Canceler"/>
<doc>
<![CDATA[Given the numOfBytes sent/received since last time throttle was called,
make the current thread sleep if I/O rate is too fast
compared to the given bandwidth. Allows for optional external cancelation.
@param numOfBytes
number of bytes sent/received since last time throttle was called
@param canceler
optional canceler to check for abort of throttle]]>
</doc>
</method>
<doc>
<![CDATA[a class to throttle the data transfers.
This class is thread safe. It can be shared by multiple threads.
The parameter bandwidthPerSec specifies the total bandwidth shared by
threads.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.DataTransferThrottler -->
<!-- start class org.apache.hadoop.hdfs.util.Diff -->
<class name="Diff" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Diff"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<constructor name="Diff" type="java.util.List, java.util.List"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<method name="search" return="int"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="protected"
deprecated="not deprecated">
<param name="elements" type="java.util.List"/>
<param name="name" type="K"/>
<doc>
<![CDATA[Search the element from the list.
@return -1 if the list is null; otherwise, return the insertion point
defined in {@link Collections#binarySearch(List, Object)}.
Note that, when the list is null, -1 is the correct insertion point.]]>
</doc>
</method>
<method name="getList" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.util.Diff.ListType"/>
<doc>
<![CDATA[@return the created list, which is never null.]]>
</doc>
</method>
<method name="searchIndex" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.util.Diff.ListType"/>
<param name="name" type="K"/>
</method>
<method name="search" return="E"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.util.Diff.ListType"/>
<param name="name" type="K"/>
<doc>
<![CDATA[@return null if the element is not found;
otherwise, return the element in the created/deleted list.]]>
</doc>
</method>
<method name="isEmpty" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true if no changes contained in the diff]]>
</doc>
</method>
<method name="create" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="element" type="E"/>
<doc>
<![CDATA[Create an element in current state.
@return the c-list insertion point for undo.]]>
</doc>
</method>
<method name="undoCreate"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="element" type="E"/>
<param name="insertionPoint" type="int"/>
<doc>
<![CDATA[Undo the previous create(E) operation. Note that the behavior is
undefined if the previous operation is not create(E).]]>
</doc>
</method>
<method name="delete" return="org.apache.hadoop.hdfs.util.Diff.UndoInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="element" type="E"/>
<doc>
<![CDATA[Delete an element from current state.
@return the undo information.]]>
</doc>
</method>
<method name="undoDelete"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="element" type="E"/>
<param name="undoInfo" type="org.apache.hadoop.hdfs.util.Diff.UndoInfo"/>
<doc>
<![CDATA[Undo the previous delete(E) operation. Note that the behavior is
undefined if the previous operation is not delete(E).]]>
</doc>
</method>
<method name="modify" return="org.apache.hadoop.hdfs.util.Diff.UndoInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="oldElement" type="E"/>
<param name="newElement" type="E"/>
<doc>
<![CDATA[Modify an element in current state.
@return the undo information.]]>
</doc>
</method>
<method name="undoModify"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="oldElement" type="E"/>
<param name="newElement" type="E"/>
<param name="undoInfo" type="org.apache.hadoop.hdfs.util.Diff.UndoInfo"/>
<doc>
<![CDATA[Undo the previous modify(E, E) operation. Note that the behavior
is undefined if the previous operation is not modify(E, E).]]>
</doc>
</method>
<method name="accessPrevious" return="org.apache.hadoop.hdfs.util.Diff.Container"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="K"/>
<doc>
<![CDATA[Find an element in the previous state.
@return null if the element cannot be determined in the previous state
since no change is recorded and it should be determined in the
current state; otherwise, return a {@link Container} containing the
element in the previous state. Note that the element can possibly
be null which means that the element is not found in the previous
state.]]>
</doc>
</method>
<method name="accessCurrent" return="org.apache.hadoop.hdfs.util.Diff.Container"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="K"/>
<doc>
<![CDATA[Find an element in the current state.
@return null if the element cannot be determined in the current state since
no change is recorded and it should be determined in the previous
state; otherwise, return a {@link Container} containing the element in
the current state. Note that the element can possibly be null which
means that the element is not found in the current state.]]>
</doc>
</method>
<method name="apply2Previous" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="previous" type="java.util.List"/>
<doc>
<![CDATA[Apply this diff to previous state in order to obtain current state.
@return the current state of the list.]]>
</doc>
</method>
<method name="apply2Current" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="current" type="java.util.List"/>
<doc>
<![CDATA[Apply the reverse of this diff to current state in order
to obtain the previous state.
@return the previous state of the list.]]>
</doc>
</method>
<method name="combinePosterior"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="posterior" type="org.apache.hadoop.hdfs.util.Diff"/>
<param name="deletedProcesser" type="org.apache.hadoop.hdfs.util.Diff.Processor"/>
<doc>
<![CDATA[Combine this diff with a posterior diff. We have the following cases:
<pre>
1. For (c, 0) in the posterior diff, check the element in this diff:
1.1 (c', 0) in this diff: impossible
1.2 (0, d') in this diff: put in c-list --> (c, d')
1.3 (c', d') in this diff: impossible
1.4 (0, 0) in this diff: put in c-list --> (c, 0)
This is the same logic as create(E).
2. For (0, d) in the posterior diff,
2.1 (c', 0) in this diff: remove from c-list --> (0, 0)
2.2 (0, d') in this diff: impossible
2.3 (c', d') in this diff: remove from c-list --> (0, d')
2.4 (0, 0) in this diff: put in d-list --> (0, d)
This is the same logic as delete(E).
3. For (c, d) in the posterior diff,
3.1 (c', 0) in this diff: replace the element in c-list --> (c, 0)
3.2 (0, d') in this diff: impossible
3.3 (c', d') in this diff: replace the element in c-list --> (c, d')
3.4 (0, 0) in this diff: put in c-list and d-list --> (c, d)
This is the same logic as modify(E, E).
</pre>
@param posterior The posterior diff to combine with.
@param deletedProcesser
process the deleted/overwritten elements in case 2.1, 2.3, 3.1 and 3.3.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[The difference between the current state and a previous state of a list.
Given a previous state of a set and a sequence of create, delete and modify
operations such that the current state of the set can be obtained by applying
the operations on the previous state, the following algorithm construct the
difference between the current state and the previous state of the set.
<pre>
Two lists are maintained in the algorithm:
- c-list for newly created elements
- d-list for the deleted elements
Denote the state of an element by the following
(0, 0): neither in c-list nor d-list
(c, 0): in c-list but not in d-list
(0, d): in d-list but not in c-list
(c, d): in both c-list and d-list
For each case below, ( , ) at the end shows the result state of the element.
Case 1. Suppose the element i is NOT in the previous state. (0, 0)
1.1. create i in current: add it to c-list (c, 0)
1.1.1. create i in current and then create: impossible
1.1.2. create i in current and then delete: remove it from c-list (0, 0)
1.1.3. create i in current and then modify: replace it in c-list (c', 0)
1.2. delete i from current: impossible
1.3. modify i in current: impossible
Case 2. Suppose the element i is ALREADY in the previous state. (0, 0)
2.1. create i in current: impossible
2.2. delete i from current: add it to d-list (0, d)
2.2.1. delete i from current and then create: add it to c-list (c, d)
2.2.2. delete i from current and then delete: impossible
2.2.2. delete i from current and then modify: impossible
2.3. modify i in current: put it in both c-list and d-list (c, d)
2.3.1. modify i in current and then create: impossible
2.3.2. modify i in current and then delete: remove it from c-list (0, d)
2.3.3. modify i in current and then modify: replace it in c-list (c', d)
</pre>
@param <K> The key type.
@param <E> The element type, which must implement {@link Element} interface.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.Diff -->
<!-- start class org.apache.hadoop.hdfs.util.Diff.Container -->
<class name="Diff.Container" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getElement" return="E"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the element.]]>
</doc>
</method>
<doc>
<![CDATA[Containing exactly one element.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.Diff.Container -->
<!-- start interface org.apache.hadoop.hdfs.util.Diff.Element -->
<interface name="Diff.Element" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.lang.Comparable"/>
<method name="getKey" return="K"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the key of this object.]]>
</doc>
</method>
<doc>
<![CDATA[An interface for the elements in a {@link Diff}.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.util.Diff.Element -->
<!-- start class org.apache.hadoop.hdfs.util.Diff.ListType -->
<class name="Diff.ListType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.util.Diff.ListType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.util.Diff.ListType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.util.Diff.ListType -->
<!-- start interface org.apache.hadoop.hdfs.util.Diff.Processor -->
<interface name="Diff.Processor" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="process"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="element" type="E"/>
<doc>
<![CDATA[Process the given element.]]>
</doc>
</method>
<doc>
<![CDATA[An interface for passing a method in order to process elements.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.util.Diff.Processor -->
<!-- start class org.apache.hadoop.hdfs.util.Diff.UndoInfo -->
<class name="Diff.UndoInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getTrashedElement" return="E"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Undo information for some operations such as delete(E)
and {@link Diff#modify(Element, Element)}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.Diff.UndoInfo -->
<!-- start class org.apache.hadoop.hdfs.util.EnumCounters -->
<class name="EnumCounters" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="EnumCounters" type="java.lang.Class"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Construct counters for the given enum constants.
@param enumClass the enum class of the counters.]]>
</doc>
</constructor>
<constructor name="EnumCounters" type="java.lang.Class, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="get" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<doc>
<![CDATA[@return the value of counter e.]]>
</doc>
</method>
<method name="asArray" return="long[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the values of counter as a shadow copy of array]]>
</doc>
</method>
<method name="negation"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Negate all counters.]]>
</doc>
</method>
<method name="set"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<param name="value" type="long"/>
<doc>
<![CDATA[Set counter e to the given value.]]>
</doc>
</method>
<method name="set"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.util.EnumCounters"/>
<doc>
<![CDATA[Set this counters to that counters.]]>
</doc>
</method>
<method name="reset"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Reset all counters to zero.]]>
</doc>
</method>
<method name="add"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<param name="value" type="long"/>
<doc>
<![CDATA[Add the given value to counter e.]]>
</doc>
</method>
<method name="add"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.util.EnumCounters"/>
<doc>
<![CDATA[Add that counters to this counters.]]>
</doc>
</method>
<method name="subtract"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<param name="value" type="long"/>
<doc>
<![CDATA[Subtract the given value from counter e.]]>
</doc>
</method>
<method name="subtract"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.util.EnumCounters"/>
<doc>
<![CDATA[Subtract this counters from that counters.]]>
</doc>
</method>
<method name="sum" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the sum of all counters.]]>
</doc>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="reset"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="val" type="long"/>
</method>
<method name="allLessOrEqual" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="val" type="long"/>
</method>
<method name="anyGreaterOrEqual" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="val" type="long"/>
</method>
<doc>
<![CDATA[Counters for an enum type.
For example, suppose there is an enum type
<pre>
enum Fruit { APPLE, ORANGE, GRAPE }
</pre>
An {@link EnumCounters} object can be created for counting the numbers of
APPLE, ORANGLE and GRAPE.
@param <E> the enum type]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.EnumCounters -->
<!-- start interface org.apache.hadoop.hdfs.util.EnumCounters.Factory -->
<interface name="EnumCounters.Factory" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="newInstance" return="C"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create a new counters instance.]]>
</doc>
</method>
<doc>
<![CDATA[A factory for creating counters.
@param <E> the enum type
@param <C> the counter type]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.util.EnumCounters.Factory -->
<!-- start class org.apache.hadoop.hdfs.util.EnumCounters.Map -->
<class name="EnumCounters.Map" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Map" type="org.apache.hadoop.hdfs.util.EnumCounters.Factory"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Construct a map.]]>
</doc>
</constructor>
<method name="getCounts" return="C"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="key" type="K"/>
<doc>
<![CDATA[@return the counters for the given key.]]>
</doc>
</method>
<method name="sum" return="C"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the sum of the values of all the counters.]]>
</doc>
</method>
<method name="sum" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<doc>
<![CDATA[@return the sum of the values of all the counters for e.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A key-value map which maps the keys to {@link EnumCounters}.
Note that null key is supported.
@param <K> the key type
@param <E> the enum type
@param <C> the counter type]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.EnumCounters.Map -->
<!-- start class org.apache.hadoop.hdfs.util.EnumDoubles -->
<class name="EnumDoubles" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="EnumDoubles" type="java.lang.Class"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Construct doubles for the given enum constants.
@param enumClass the enum class.]]>
</doc>
</constructor>
<method name="get" return="double"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<doc>
<![CDATA[@return the value corresponding to e.]]>
</doc>
</method>
<method name="negation"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Negate all values.]]>
</doc>
</method>
<method name="set"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<param name="value" type="double"/>
<doc>
<![CDATA[Set e to the given value.]]>
</doc>
</method>
<method name="set"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.util.EnumDoubles"/>
<doc>
<![CDATA[Set the values of this object to that object.]]>
</doc>
</method>
<method name="reset"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Reset all values to zero.]]>
</doc>
</method>
<method name="add"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<param name="value" type="double"/>
<doc>
<![CDATA[Add the given value to e.]]>
</doc>
</method>
<method name="add"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.util.EnumDoubles"/>
<doc>
<![CDATA[Add the values of that object to this.]]>
</doc>
</method>
<method name="subtract"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="e" type="E"/>
<param name="value" type="double"/>
<doc>
<![CDATA[Subtract the given value from e.]]>
</doc>
</method>
<method name="subtract"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.util.EnumDoubles"/>
<doc>
<![CDATA[Subtract the values of this object from that object.]]>
</doc>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Similar to {@link EnumCounters} except that the value type is double.
@param <E> the enum type]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.EnumDoubles -->
<!-- start class org.apache.hadoop.hdfs.util.Holder -->
<class name="Holder" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Holder" type="T"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="held" type="T"
transient="false" volatile="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A Holder is simply a wrapper around some other object. This is useful
in particular for storing immutable values like boxed Integers in a
collection without having to do the &quot;lookup&quot; of the value twice.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.Holder -->
<!-- start class org.apache.hadoop.hdfs.util.LightWeightHashSet -->
<class name="LightWeightHashSet" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.util.Collection"/>
<constructor name="LightWeightHashSet" type="int, float, float"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@param initCapacity
Recommended size of the internal array.
@param maxLoadFactor
used to determine when to expand the internal array
@param minLoadFactor
used to determine when to shrink the internal array]]>
</doc>
</constructor>
<constructor name="LightWeightHashSet"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="LightWeightHashSet" type="int"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isEmpty" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Check if the set is empty.
@return true is set empty, false otherwise]]>
</doc>
</method>
<method name="getCapacity" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return the current capacity (for testing).]]>
</doc>
</method>
<method name="size" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return the number of stored elements.]]>
</doc>
</method>
<method name="getIndex" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="hashCode" type="int"/>
<doc>
<![CDATA[Get index in the internal table for a given hash.]]>
</doc>
</method>
<method name="contains" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="key" type="java.lang.Object"/>
<doc>
<![CDATA[Check if the set contains given element
@return true if element present, false otherwise.]]>
</doc>
</method>
<method name="getElement" return="T"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="key" type="T"/>
<doc>
<![CDATA[Return the element in this set which is equal to
the given key, if such an element exists.
Otherwise returns null.]]>
</doc>
</method>
<method name="getContainedElem" return="T"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="index" type="int"/>
<param name="key" type="T"/>
<param name="hashCode" type="int"/>
<doc>
<![CDATA[Check if the set contains given element at given index. If it
does, return that element.
@return the element, or null, if no element matches]]>
</doc>
</method>
<method name="addAll" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="toAdd" type="java.util.Collection"/>
<doc>
<![CDATA[All all elements in the collection. Expand if necessary.
@param toAdd - elements to add.
@return true if the set has changed, false otherwise]]>
</doc>
</method>
<method name="add" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="element" type="T"/>
<doc>
<![CDATA[Add given element to the hash table. Expand table if necessary.
@return true if the element was not present in the table, false otherwise]]>
</doc>
</method>
<method name="addElem" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="element" type="T"/>
<doc>
<![CDATA[Add given element to the hash table
@return true if the element was not present in the table, false otherwise]]>
</doc>
</method>
<method name="remove" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="key" type="java.lang.Object"/>
<doc>
<![CDATA[Remove the element corresponding to the key.
@return If such element exists, return true. Otherwise, return false.]]>
</doc>
</method>
<method name="removeElem" return="org.apache.hadoop.hdfs.util.LightWeightHashSet.LinkedElement"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="key" type="T"/>
<doc>
<![CDATA[Remove the element corresponding to the key, given key.hashCode() == index.
@return If such element exists, return true. Otherwise, return false.]]>
</doc>
</method>
<method name="pollN" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="n" type="int"/>
<doc>
<![CDATA[Remove and return n elements from the hashtable.
The order in which entries are removed is unspecified, and
and may not correspond to the order in which they were inserted.
@return first element]]>
</doc>
</method>
<method name="pollAll" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Remove all elements from the set and return them. Clear the entries.]]>
</doc>
</method>
<method name="pollToArray" return="T[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="array" type="T[]"/>
<doc>
<![CDATA[Get array.length elements from the set, and put them into the array.]]>
</doc>
</method>
<method name="shrinkIfNecessary"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[Checks if we need to shrink, and shrinks if necessary.]]>
</doc>
</method>
<method name="expandIfNecessary"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[Checks if we need to expand, and expands if necessary.]]>
</doc>
</method>
<method name="iterator" return="java.util.Iterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="printDetails"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.PrintStream"/>
<doc>
<![CDATA[Print detailed information of this object.]]>
</doc>
</method>
<method name="clear"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Clear the set. Resize it to the original capacity.]]>
</doc>
</method>
<method name="toArray" return="java.lang.Object[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toArray" return="U[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="U[]"/>
</method>
<method name="containsAll" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="c" type="java.util.Collection"/>
</method>
<method name="removeAll" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="c" type="java.util.Collection"/>
</method>
<method name="retainAll" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="c" type="java.util.Collection"/>
</method>
<field name="DEFAULT_MAX_LOAD_FACTOR" type="float"
transient="false" volatile="false"
static="true" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<field name="DEFAUT_MIN_LOAD_FACTOR" type="float"
transient="false" volatile="false"
static="true" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<field name="MINIMUM_CAPACITY" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<field name="entries" type="org.apache.hadoop.hdfs.util.LightWeightHashSet.LinkedElement[]"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[An internal array of entries, which are the rows of the hash table. The
size must be a power of two.]]>
</doc>
</field>
<field name="size" type="int"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[The size of the set (not the entry array).]]>
</doc>
</field>
<field name="modification" type="int"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[Modification version for fail-fast.
@see ConcurrentModificationException]]>
</doc>
</field>
<doc>
<![CDATA[A low memory linked hash set implementation, which uses an array for storing
the elements and linked lists for collision resolution. This class does not
support null element.
This class is not thread safe.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.LightWeightHashSet -->
<!-- start class org.apache.hadoop.hdfs.util.LightWeightLinkedSet -->
<class name="LightWeightLinkedSet" extends="org.apache.hadoop.hdfs.util.LightWeightHashSet"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="LightWeightLinkedSet" type="int, float, float"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@param initCapacity
Recommended size of the internal array.
@param maxLoadFactor
used to determine when to expand the internal array
@param minLoadFactor
used to determine when to shrink the internal array]]>
</doc>
</constructor>
<constructor name="LightWeightLinkedSet"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="addElem" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="element" type="T"/>
<doc>
<![CDATA[Add given element to the hash table
@return true if the element was not present in the table, false otherwise]]>
</doc>
</method>
<method name="removeElem" return="org.apache.hadoop.hdfs.util.LightWeightLinkedSet.DoubleLinkedElement"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="key" type="T"/>
<doc>
<![CDATA[Remove the element corresponding to the key, given key.hashCode() == index.
@return Return the entry with the element if exists. Otherwise return null.]]>
</doc>
</method>
<method name="pollFirst" return="T"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Remove and return first element on the linked list of all elements.
@return first element]]>
</doc>
</method>
<method name="pollN" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="n" type="int"/>
<doc>
<![CDATA[Remove and return n elements from the hashtable.
The order in which entries are removed is corresponds
to the order in which they were inserted.
@return first element]]>
</doc>
</method>
<method name="pollAll" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Remove all elements from the set and return them in order. Traverse the
link list, don't worry about hashtable - faster version of the parent
method.]]>
</doc>
</method>
<method name="toArray" return="U[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="a" type="U[]"/>
</method>
<method name="iterator" return="java.util.Iterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="clear"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Clear the set. Resize it to the original capacity.]]>
</doc>
</method>
<doc>
<![CDATA[A low memory linked hash set implementation, which uses an array for storing
the elements and linked lists for collision resolution. In addition it stores
elements in a linked list to ensure ordered traversal. This class does not
support null element.
This class is not thread safe.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.LightWeightLinkedSet -->
<!-- start class org.apache.hadoop.hdfs.util.LongBitFormat -->
<class name="LongBitFormat" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.io.Serializable"/>
<constructor name="LongBitFormat" type="java.lang.String, org.apache.hadoop.hdfs.util.LongBitFormat, int, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="retrieve" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="record" type="long"/>
<doc>
<![CDATA[Retrieve the value from the record.]]>
</doc>
</method>
<method name="combine" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="value" type="long"/>
<param name="record" type="long"/>
<doc>
<![CDATA[Combine the value to the record.]]>
</doc>
</method>
<method name="getMin" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Bit format in a long.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.LongBitFormat -->
<!-- start class org.apache.hadoop.hdfs.util.MD5FileUtils -->
<class name="MD5FileUtils" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="MD5FileUtils"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="verifySavedMD5"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dataFile" type="java.io.File"/>
<param name="expectedMD5" type="org.apache.hadoop.io.MD5Hash"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Verify that the previously saved md5 for the given file matches
expectedMd5.
@throws IOException]]>
</doc>
</method>
<method name="readStoredMd5ForFile" return="org.apache.hadoop.io.MD5Hash"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dataFile" type="java.io.File"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Read the md5 checksum stored alongside the given data file.
@param dataFile the file containing data
@return the checksum stored in dataFile.md5]]>
</doc>
</method>
<method name="computeMd5ForFile" return="org.apache.hadoop.io.MD5Hash"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dataFile" type="java.io.File"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Read dataFile and compute its MD5 checksum.]]>
</doc>
</method>
<method name="saveMD5File"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dataFile" type="java.io.File"/>
<param name="digest" type="org.apache.hadoop.io.MD5Hash"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Save the ".md5" file that lists the md5sum of another file.
@param dataFile the original file whose md5 was computed
@param digest the computed digest
@throws IOException]]>
</doc>
</method>
<method name="renameMD5File"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="oldDataFile" type="java.io.File"/>
<param name="newDataFile" type="java.io.File"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getDigestFileForFile" return="java.io.File"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="file" type="java.io.File"/>
<doc>
<![CDATA[@return a reference to the file with .md5 suffix that will
contain the md5 checksum for the given data file.]]>
</doc>
</method>
<field name="MD5_SUFFIX" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Static functions for dealing with files of the same format
that the Unix "md5sum" utility writes.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.MD5FileUtils -->
<!-- start class org.apache.hadoop.hdfs.util.ReadOnlyList.Util -->
<class name="ReadOnlyList.Util" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Util"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="emptyList" return="org.apache.hadoop.hdfs.util.ReadOnlyList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return an empty list.]]>
</doc>
</method>
<method name="binarySearch" return="int"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="list" type="org.apache.hadoop.hdfs.util.ReadOnlyList"/>
<param name="key" type="K"/>
<doc>
<![CDATA[The same as {@link Collections#binarySearch(List, Object)}
except that the list is a {@link ReadOnlyList}.
@return the insertion point defined
in {@link Collections#binarySearch(List, Object)}.]]>
</doc>
</method>
<method name="asReadOnlyList" return="org.apache.hadoop.hdfs.util.ReadOnlyList"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="list" type="java.util.List"/>
<doc>
<![CDATA[@return a {@link ReadOnlyList} view of the given list.]]>
</doc>
</method>
<method name="asList" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="list" type="org.apache.hadoop.hdfs.util.ReadOnlyList"/>
<doc>
<![CDATA[@return a {@link List} view of the given list.]]>
</doc>
</method>
<doc>
<![CDATA[Utilities for {@link ReadOnlyList}]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.ReadOnlyList.Util -->
<!-- start interface org.apache.hadoop.hdfs.util.ReferenceCountMap.ReferenceCounter -->
<interface name="ReferenceCountMap.ReferenceCounter" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getRefCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incrementAndGetRefCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="decrementAndGetRefCount" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Interface for the reference count holder]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.util.ReferenceCountMap.ReferenceCounter -->
<!-- start interface org.apache.hadoop.hdfs.util.RwLock -->
<interface name="RwLock" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="readLock"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Acquire read lock.]]>
</doc>
</method>
<method name="readUnlock"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Release read lock.]]>
</doc>
</method>
<method name="hasReadLock" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Check if the current thread holds read lock.]]>
</doc>
</method>
<method name="writeLock"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Acquire write lock.]]>
</doc>
</method>
<method name="writeLockInterruptibly"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<doc>
<![CDATA[Acquire write lock, unless interrupted while waiting]]>
</doc>
</method>
<method name="writeUnlock"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Release write lock.]]>
</doc>
</method>
<method name="hasWriteLock" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Check if the current thread holds write lock.]]>
</doc>
</method>
<doc>
<![CDATA[Read-write lock interface.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.util.RwLock -->
<!-- start class org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException -->
<class name="XMLUtils.InvalidXmlException" extends="java.lang.RuntimeException"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="InvalidXmlException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Exception that reflects an invalid XML document.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException -->
<!-- start class org.apache.hadoop.hdfs.util.XMLUtils.Stanza -->
<class name="XMLUtils.Stanza" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Stanza"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setValue"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="value" type="java.lang.String"/>
</method>
<method name="getValue" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="hasChildren" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
<doc>
<![CDATA[Discover if a stanza has a given entry.
@param name entry to look for
@return true if the entry was found]]>
</doc>
</method>
<method name="getChildren" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
<exception name="XMLUtils.InvalidXmlException" type="org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException"/>
<doc>
<![CDATA[Pull an entry from a stanza.
@param name entry to look for
@return the entry]]>
</doc>
</method>
<method name="getValue" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
<exception name="XMLUtils.InvalidXmlException" type="org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException"/>
<doc>
<![CDATA[Pull a string entry from a stanza.
@param name entry to look for
@return the entry]]>
</doc>
</method>
<method name="getValueOrNull" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
<exception name="XMLUtils.InvalidXmlException" type="org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException"/>
<doc>
<![CDATA[Pull a string entry from a stanza, or null.
@param name entry to look for
@return the entry, or null if it was not found.]]>
</doc>
</method>
<method name="addChild"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
<param name="child" type="org.apache.hadoop.hdfs.util.XMLUtils.Stanza"/>
<doc>
<![CDATA[Add an entry to a stanza.
@param name name of the entry to add
@param child the entry to add]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Convert a stanza to a human-readable string.]]>
</doc>
</method>
<doc>
<![CDATA[Represents a bag of key-value pairs encountered during parsing an XML
file.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.XMLUtils.Stanza -->
<!-- start class org.apache.hadoop.hdfs.util.XMLUtils.UnmanglingError -->
<class name="XMLUtils.UnmanglingError" extends="java.lang.RuntimeException"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnmanglingError" type="java.lang.String, java.lang.Exception"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="UnmanglingError" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Exception that reflects a string that cannot be unmangled.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.util.XMLUtils.UnmanglingError -->
</package>
<package name="org.apache.hadoop.hdfs.web">
<!-- start class org.apache.hadoop.hdfs.web.AuthFilter -->
<class name="AuthFilter" extends="org.apache.hadoop.security.authentication.server.AuthenticationFilter"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="AuthFilter"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getConfiguration" return="java.util.Properties"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="prefix" type="java.lang.String"/>
<param name="config" type="javax.servlet.FilterConfig"/>
<exception name="ServletException" type="javax.servlet.ServletException"/>
<doc>
<![CDATA[Returns the filter configuration properties,
including the ones prefixed with {@link #CONF_PREFIX}.
The prefix is removed from the returned property names.
@param prefix parameter not used.
@param config parameter contains the initialization values.
@return Hadoop-Auth configuration properties.
@throws ServletException]]>
</doc>
</method>
<method name="doFilter"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="request" type="javax.servlet.ServletRequest"/>
<param name="response" type="javax.servlet.ServletResponse"/>
<param name="filterChain" type="javax.servlet.FilterChain"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="ServletException" type="javax.servlet.ServletException"/>
</method>
<doc>
<![CDATA[Subclass of {@link AuthenticationFilter} that
obtains Hadoop-Auth configuration for webhdfs.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.AuthFilter -->
<!-- start class org.apache.hadoop.hdfs.web.ByteRangeInputStream -->
<class name="ByteRangeInputStream" extends="org.apache.hadoop.fs.FSInputStream"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ByteRangeInputStream" type="org.apache.hadoop.hdfs.web.ByteRangeInputStream.URLOpener, org.apache.hadoop.hdfs.web.ByteRangeInputStream.URLOpener"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create with the specified URLOpeners. Original url is used to open the
stream for the first time. Resolved url is used in subsequent requests.
@param o Original url
@param r Resolved url]]>
</doc>
</constructor>
<method name="getResolvedUrl" return="java.net.URL"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="connection" type="java.net.HttpURLConnection"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getInputStream" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="openInputStream" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="read" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="read" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="byte[]"/>
<param name="off" type="int"/>
<param name="len" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="seek"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pos" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Seek to the given offset from the start of the file.
The next read() will be from that location. Can't
seek past the end of the file.]]>
</doc>
</method>
<method name="getPos" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Return the current offset from the start of the file]]>
</doc>
</method>
<method name="seekToNewSource" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="targetPos" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Seeks a different copy of the data. Returns true if
found a new source, false otherwise.]]>
</doc>
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="in" type="java.io.InputStream"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="originalURL" type="org.apache.hadoop.hdfs.web.ByteRangeInputStream.URLOpener"
transient="false" volatile="false"
static="false" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<field name="resolvedURL" type="org.apache.hadoop.hdfs.web.ByteRangeInputStream.URLOpener"
transient="false" volatile="false"
static="false" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<field name="startPos" type="long"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="currentPos" type="long"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="fileLength" type="java.lang.Long"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[To support HTTP byte streams, a new connection to an HTTP server needs to be
created each time. This class hides the complexity of those multiple
connections from the client. Whenever seek() is called, a new connection
is made on the successive read(). The normal input stream functions are
connected to the currently active input stream.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.ByteRangeInputStream -->
<!-- start class org.apache.hadoop.hdfs.web.ByteRangeInputStream.URLOpener -->
<class name="ByteRangeInputStream.URLOpener" extends="java.lang.Object"
abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="URLOpener" type="java.net.URL"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setURL"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="u" type="java.net.URL"/>
</method>
<method name="getURL" return="java.net.URL"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="connect" return="java.net.HttpURLConnection"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="offset" type="long"/>
<param name="resolved" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Connect to server with a data offset.]]>
</doc>
</method>
<field name="url" type="java.net.URL"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[This class wraps a URL and provides method to open connection.
It can be overridden to change how a connection is opened.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.ByteRangeInputStream.URLOpener -->
<!-- start class org.apache.hadoop.hdfs.web.JsonUtil -->
<class name="JsonUtil" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="JsonUtil"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="token" type="org.apache.hadoop.security.token.Token"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Convert a token object to a Json string.]]>
</doc>
</method>
<method name="toToken" return="org.apache.hadoop.security.token.Token"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="m" type="java.util.Map"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Convert a Json map to a Token.]]>
</doc>
</method>
<method name="toDelegationToken" return="org.apache.hadoop.security.token.Token"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Convert a Json map to a Token of DelegationTokenIdentifier.]]>
</doc>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="e" type="java.lang.Exception"/>
<doc>
<![CDATA[Convert an exception object to a Json string.]]>
</doc>
</method>
<method name="toRemoteException" return="org.apache.hadoop.ipc.RemoteException"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<doc>
<![CDATA[Convert a Json map to a RemoteException.]]>
</doc>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="key" type="java.lang.String"/>
<param name="value" type="java.lang.Object"/>
<doc>
<![CDATA[Convert a key-value pair to a Json string.]]>
</doc>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="status" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"/>
<param name="includeType" type="boolean"/>
<doc>
<![CDATA[Convert a HdfsFileStatus object to a Json string.]]>
</doc>
</method>
<method name="toFileStatus" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<param name="includesType" type="boolean"/>
<doc>
<![CDATA[Convert a Json map to a HdfsFileStatus object.]]>
</doc>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="locatedblocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlocks"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Convert LocatedBlocks to a Json string.]]>
</doc>
</method>
<method name="toLocatedBlocks" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Convert a Json map to LocatedBlock.]]>
</doc>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="contentsummary" type="org.apache.hadoop.fs.ContentSummary"/>
<doc>
<![CDATA[Convert a ContentSummary to a Json string.]]>
</doc>
</method>
<method name="toContentSummary" return="org.apache.hadoop.fs.ContentSummary"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<doc>
<![CDATA[Convert a Json map to a ContentSummary.]]>
</doc>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="checksum" type="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"/>
<doc>
<![CDATA[Convert a MD5MD5CRC32FileChecksum to a Json string.]]>
</doc>
</method>
<method name="toMD5MD5CRC32FileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Convert a Json map to a MD5MD5CRC32FileChecksum.]]>
</doc>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="status" type="org.apache.hadoop.fs.permission.AclStatus"/>
<doc>
<![CDATA[Convert a AclStatus object to a Json string.]]>
</doc>
</method>
<method name="toAclStatus" return="org.apache.hadoop.fs.permission.AclStatus"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<doc>
<![CDATA[Convert a Json map to a AclStatus object.]]>
</doc>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrs" type="java.util.List"/>
<param name="encoding" type="org.apache.hadoop.fs.XAttrCodec"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="toJsonString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrs" type="java.util.List"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getXAttr" return="byte[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<param name="name" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="toXAttrs" return="java.util.Map"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="toXAttrNames" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="json" type="java.util.Map"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[JSON Utilities]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.JsonUtil -->
<!-- start class org.apache.hadoop.hdfs.web.KerberosUgiAuthenticator -->
<class name="KerberosUgiAuthenticator" extends="org.apache.hadoop.security.authentication.client.KerberosAuthenticator"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="KerberosUgiAuthenticator"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getFallBackAuthenticator" return="org.apache.hadoop.security.authentication.client.Authenticator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Use UserGroupInformation as a fallback authenticator
if the server does not use Kerberos SPNEGO HTTP authentication.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.KerberosUgiAuthenticator -->
<!-- start class org.apache.hadoop.hdfs.web.ParamFilter -->
<class name="ParamFilter" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="com.sun.jersey.spi.container.ResourceFilter"/>
<constructor name="ParamFilter"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getRequestFilter" return="com.sun.jersey.spi.container.ContainerRequestFilter"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getResponseFilter" return="com.sun.jersey.spi.container.ContainerResponseFilter"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[A filter to change parameter names to lower cases
so that parameter names are considered as case insensitive.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.ParamFilter -->
<!-- start class org.apache.hadoop.hdfs.web.SWebHdfsFileSystem -->
<class name="SWebHdfsFileSystem" extends="org.apache.hadoop.hdfs.web.WebHdfsFileSystem"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SWebHdfsFileSystem"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getScheme" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTransportScheme" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</method>
<method name="getTokenKind" return="org.apache.hadoop.io.Text"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</method>
<method name="getDefaultPort" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="TOKEN_KIND" type="org.apache.hadoop.io.Text"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="SCHEME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.web.SWebHdfsFileSystem -->
<!-- start class org.apache.hadoop.hdfs.web.WebHdfsFileSystem -->
<class name="WebHdfsFileSystem" extends="org.apache.hadoop.fs.FileSystem"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.fs.DelegationTokenRenewer.Renewable"/>
<implements name="org.apache.hadoop.hdfs.web.TokenAspect.TokenManagementDelegator"/>
<constructor name="WebHdfsFileSystem"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getScheme" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return the protocol scheme for the FileSystem.
<p/>
@return <code>webhdfs</code>]]>
</doc>
</method>
<method name="getTransportScheme" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[return the underlying transport protocol (http / https).]]>
</doc>
</method>
<method name="getTokenKind" return="org.apache.hadoop.io.Text"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</method>
<method name="initialize"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="uri" type="java.net.URI"/>
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getCanonicalUri" return="java.net.URI"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isEnabled" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="log" type="org.apache.commons.logging.Log"/>
<doc>
<![CDATA[Is WebHDFS enabled in conf?]]>
</doc>
</method>
<method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getDefaultPort" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getUri" return="java.net.URI"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="canonicalizeUri" return="java.net.URI"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="uri" type="java.net.URI"/>
</method>
<method name="getHomeDirectoryString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<doc>
<![CDATA[@return the home directory.]]>
</doc>
</method>
<method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setWorkingDirectory"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dir" type="org.apache.hadoop.fs.Path"/>
</method>
<method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getAclStatus" return="org.apache.hadoop.fs.permission.AclStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="mkdirs" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="createSymlink"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="destination" type="org.apache.hadoop.fs.Path"/>
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="createParent" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a symlink pointing to the destination path.
@see org.apache.hadoop.fs.Hdfs#createSymlink(Path, Path, boolean)]]>
</doc>
</method>
<method name="rename" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="dst" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="rename"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="dst" type="org.apache.hadoop.fs.Path"/>
<param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setXAttr"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="name" type="java.lang.String"/>
<param name="value" type="byte[]"/>
<param name="flag" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getXAttr" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="name" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getXAttrs" return="java.util.Map"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getXAttrs" return="java.util.Map"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="names" type="java.util.List"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="listXAttrs" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="removeXAttr"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="name" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setOwner"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="owner" type="java.lang.String"/>
<param name="group" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="modifyAclEntries"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="aclSpec" type="java.util.List"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="removeAclEntries"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="aclSpec" type="java.util.List"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="removeDefaultAcl"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="removeAcl"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setAcl"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="aclSpec" type="java.util.List"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="createSnapshot" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="snapshotName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="deleteSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="snapshotName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="renameSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="snapshotOldName" type="java.lang.String"/>
<param name="snapshotNewName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setReplication" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="replication" type="short"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setTimes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="mtime" type="long"/>
<param name="atime" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getDefaultBlockSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDefaultReplication" return="short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="concat"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="trg" type="org.apache.hadoop.fs.Path"/>
<param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<param name="overwrite" type="boolean"/>
<param name="bufferSize" type="int"/>
<param name="replication" type="short"/>
<param name="blockSize" type="long"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="bufferSize" type="int"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="truncate" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="newLength" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="recursive" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="buffersize" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="close"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="renewer" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getRenewToken" return="org.apache.hadoop.security.token.Token"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setDelegationToken"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="token" type="org.apache.hadoop.security.token.Token"/>
</method>
<method name="renewDelegationToken" return="long"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="token" type="org.apache.hadoop.security.token.Token"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="cancelDelegationToken"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="token" type="org.apache.hadoop.security.token.Token"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="status" type="org.apache.hadoop.fs.FileStatus"/>
<param name="offset" type="long"/>
<param name="length" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="offset" type="long"/>
<param name="length" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="access"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="mode" type="org.apache.hadoop.fs.permission.FsAction"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getCanonicalServiceName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="SCHEME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[File System URI: {SCHEME}://namenode:port/path/to/file]]>
</doc>
</field>
<field name="VERSION" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[WebHdfs version.]]>
</doc>
</field>
<field name="PATH_PREFIX" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file]]>
</doc>
</field>
<field name="connectionFactory" type="org.apache.hadoop.hdfs.web.URLConnectionFactory"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[Default connection factory may be overridden in tests to use smaller timeout values]]>
</doc>
</field>
<field name="TOKEN_KIND" type="org.apache.hadoop.io.Text"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Delegation token kind]]>
</doc>
</field>
<field name="CANT_FALLBACK_TO_INSECURE_MSG" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="tokenServiceName" type="org.apache.hadoop.io.Text"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A FileSystem for HDFS over the web.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.WebHdfsFileSystem -->
</package>
<package name="org.apache.hadoop.hdfs.web.resources">
<!-- start class org.apache.hadoop.hdfs.web.resources.AccessTimeParam -->
<class name="AccessTimeParam" extends="org.apache.hadoop.hdfs.web.resources.LongParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="AccessTimeParam" type="java.lang.Long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="AccessTimeParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Access time parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.AccessTimeParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.AclPermissionParam -->
<class name="AclPermissionParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="AclPermissionParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<constructor name="AclPermissionParam" type="java.util.List"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAclPermission" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="includePermission" type="boolean"/>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[AclPermission parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.AclPermissionParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.BlockSizeParam -->
<class name="BlockSizeParam" extends="org.apache.hadoop.hdfs.web.resources.LongParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockSizeParam" type="java.lang.Long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="BlockSizeParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getValue" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[@return the value or, if it is null, return the default from conf.]]>
</doc>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Block size parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.BlockSizeParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.BufferSizeParam -->
<class name="BufferSizeParam" extends="org.apache.hadoop.hdfs.web.resources.IntegerParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BufferSizeParam" type="java.lang.Integer"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="BufferSizeParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getValue" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[@return the value or, if it is null, return the default from conf.]]>
</doc>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Buffer size parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.BufferSizeParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam -->
<class name="ConcatSourcesParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ConcatSourcesParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<constructor name="ConcatSourcesParam" type="org.apache.hadoop.fs.Path[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAbsolutePaths" return="java.lang.String[]"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the absolute path.]]>
</doc>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[The concat source paths parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.CreateParentParam -->
<class name="CreateParentParam" extends="org.apache.hadoop.hdfs.web.resources.BooleanParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CreateParentParam" type="java.lang.Boolean"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="CreateParentParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Create Parent parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.CreateParentParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.DelegationParam -->
<class name="DelegationParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DelegationParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Represents delegation token used for authentication.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.DelegationParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.DeleteOpParam -->
<class name="DeleteOpParam" extends="org.apache.hadoop.hdfs.web.resources.HttpOpParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DeleteOpParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Http DELETE operation parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.DeleteOpParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.DeleteOpParam.Op -->
<class name="DeleteOpParam.Op" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op"/>
<method name="values" return="org.apache.hadoop.hdfs.web.resources.DeleteOpParam.Op[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.web.resources.DeleteOpParam.Op"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getType" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRequireAuth" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDoOutput" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRedirect" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getExpectedHttpResponseCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toQueryString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Delete operations.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.DeleteOpParam.Op -->
<!-- start class org.apache.hadoop.hdfs.web.resources.DestinationParam -->
<class name="DestinationParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DestinationParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Destination path parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.DestinationParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.DoAsParam -->
<class name="DoAsParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DoAsParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[DoAs parameter for proxy user.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.DoAsParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.ExceptionHandler -->
<class name="ExceptionHandler" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="javax.ws.rs.ext.ExceptionMapper"/>
<constructor name="ExceptionHandler"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="toResponse" return="javax.ws.rs.core.Response"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="e" type="java.lang.Exception"/>
</method>
<method name="initResponse"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="response" type="javax.servlet.http.HttpServletResponse"/>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Handle exceptions.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.ExceptionHandler -->
<!-- start class org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam -->
<class name="ExcludeDatanodesParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ExcludeDatanodesParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Exclude datanodes param]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.FsActionParam -->
<class name="FsActionParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="FsActionParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<constructor name="FsActionParam" type="org.apache.hadoop.fs.permission.FsAction"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[{@link FsAction} Parameter]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.FsActionParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.GetOpParam -->
<class name="GetOpParam" extends="org.apache.hadoop.hdfs.web.resources.HttpOpParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="GetOpParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Http GET operation parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.GetOpParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.GetOpParam.Op -->
<class name="GetOpParam.Op" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op"/>
<method name="values" return="org.apache.hadoop.hdfs.web.resources.GetOpParam.Op[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.web.resources.GetOpParam.Op"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getType" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRequireAuth" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDoOutput" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRedirect" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getExpectedHttpResponseCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toQueryString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Get operations.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.GetOpParam.Op -->
<!-- start class org.apache.hadoop.hdfs.web.resources.GroupParam -->
<class name="GroupParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="GroupParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Group parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.GroupParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.HttpOpParam -->
<class name="HttpOpParam" extends="org.apache.hadoop.hdfs.web.resources.EnumParam"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getValueString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the parameter value as a string]]>
</doc>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Http operation parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.HttpOpParam -->
<!-- start interface org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op -->
<interface name="HttpOpParam.Op" abstract="true"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getType" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the Http operation type.]]>
</doc>
</method>
<method name="getRequireAuth" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true if the operation cannot use a token]]>
</doc>
</method>
<method name="getDoOutput" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true if the operation will do output.]]>
</doc>
</method>
<method name="getRedirect" return="boolean"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true if the operation will be redirected.]]>
</doc>
</method>
<method name="getExpectedHttpResponseCode" return="int"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true the expected http response code.]]>
</doc>
</method>
<method name="toQueryString" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return a URI query string.]]>
</doc>
</method>
<doc>
<![CDATA[Http operation interface.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op -->
<!-- start class org.apache.hadoop.hdfs.web.resources.HttpOpParam.TemporaryRedirectOp -->
<class name="HttpOpParam.TemporaryRedirectOp" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op"/>
<method name="valueOf" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.TemporaryRedirectOp"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="op" type="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op"/>
<doc>
<![CDATA[Get an object for the given op.]]>
</doc>
</method>
<method name="getType" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRequireAuth" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDoOutput" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRedirect" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getExpectedHttpResponseCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Override the original expected response with "Temporary Redirect".]]>
</doc>
</method>
<method name="toQueryString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Expects HTTP response 307 "Temporary Redirect".]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.HttpOpParam.TemporaryRedirectOp -->
<!-- start class org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type -->
<class name="HttpOpParam.Type" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Http operation types]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type -->
<!-- start class org.apache.hadoop.hdfs.web.resources.LengthParam -->
<class name="LengthParam" extends="org.apache.hadoop.hdfs.web.resources.LongParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="LengthParam" type="java.lang.Long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="LengthParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Length parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.LengthParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.ModificationTimeParam -->
<class name="ModificationTimeParam" extends="org.apache.hadoop.hdfs.web.resources.LongParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ModificationTimeParam" type="java.lang.Long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="ModificationTimeParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Modification time parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.ModificationTimeParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam -->
<class name="NamenodeAddressParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="NamenodeAddressParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<constructor name="NamenodeAddressParam" type="org.apache.hadoop.hdfs.server.namenode.NameNode"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Construct an object using the RPC address of the given namenode.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Namenode RPC address parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.NewLengthParam -->
<class name="NewLengthParam" extends="org.apache.hadoop.hdfs.web.resources.LongParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="NewLengthParam" type="java.lang.Long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="NewLengthParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[NewLength parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.NewLengthParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.OffsetParam -->
<class name="OffsetParam" extends="org.apache.hadoop.hdfs.web.resources.LongParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="OffsetParam" type="java.lang.Long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="OffsetParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getOffset" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Offset parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.OffsetParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam -->
<class name="OldSnapshotNameParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="OldSnapshotNameParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[The old snapshot name parameter for renameSnapshot operation.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.OverwriteParam -->
<class name="OverwriteParam" extends="org.apache.hadoop.hdfs.web.resources.BooleanParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="OverwriteParam" type="java.lang.Boolean"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="OverwriteParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Overwrite parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.OverwriteParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.OwnerParam -->
<class name="OwnerParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="OwnerParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Owner parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.OwnerParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.Param -->
<class name="Param" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="toSortedString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="separator" type="java.lang.String"/>
<param name="parameters" type="org.apache.hadoop.hdfs.web.resources.Param[]"/>
<doc>
<![CDATA[Convert the parameters to a sorted String.
@param separator URI parameter separator character
@param parameters parameters to encode into a string
@return the encoded URI string]]>
</doc>
</method>
<method name="getValue" return="T"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the parameter value.]]>
</doc>
</method>
<method name="getValueString" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the parameter value as a string]]>
</doc>
</method>
<method name="getName" return="java.lang.String"
abstract="true" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the parameter name.]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Base class of parameters.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.Param -->
<!-- start class org.apache.hadoop.hdfs.web.resources.PermissionParam -->
<class name="PermissionParam" extends="org.apache.hadoop.hdfs.web.resources.ShortParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="PermissionParam" type="org.apache.hadoop.fs.permission.FsPermission"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="PermissionParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getDefaultFsPermission" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the default FsPermission.]]>
</doc>
</method>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFsPermission" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the represented FsPermission.]]>
</doc>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Permission parameter, use a Short to represent a FsPermission.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.PermissionParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.PostOpParam -->
<class name="PostOpParam" extends="org.apache.hadoop.hdfs.web.resources.HttpOpParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="PostOpParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Http POST operation parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.PostOpParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.PostOpParam.Op -->
<class name="PostOpParam.Op" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op"/>
<method name="values" return="org.apache.hadoop.hdfs.web.resources.PostOpParam.Op[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.web.resources.PostOpParam.Op"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getType" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRequireAuth" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDoOutput" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRedirect" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getExpectedHttpResponseCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toQueryString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return a URI query string.]]>
</doc>
</method>
<doc>
<![CDATA[Post operations.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.PostOpParam.Op -->
<!-- start class org.apache.hadoop.hdfs.web.resources.PutOpParam -->
<class name="PutOpParam" extends="org.apache.hadoop.hdfs.web.resources.HttpOpParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="PutOpParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Http POST operation parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.PutOpParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.PutOpParam.Op -->
<class name="PutOpParam.Op" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op"/>
<method name="values" return="org.apache.hadoop.hdfs.web.resources.PutOpParam.Op[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.web.resources.PutOpParam.Op"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="getType" return="org.apache.hadoop.hdfs.web.resources.HttpOpParam.Type"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRequireAuth" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDoOutput" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRedirect" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getExpectedHttpResponseCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toQueryString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Put operations.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.PutOpParam.Op -->
<!-- start class org.apache.hadoop.hdfs.web.resources.RecursiveParam -->
<class name="RecursiveParam" extends="org.apache.hadoop.hdfs.web.resources.BooleanParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="RecursiveParam" type="java.lang.Boolean"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="RecursiveParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Recursive parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.RecursiveParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam -->
<class name="RenameOptionSetParam" extends="org.apache.hadoop.hdfs.web.resources.EnumSetParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="RenameOptionSetParam" type="org.apache.hadoop.fs.Options.Rename[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param options rename options.]]>
</doc>
</constructor>
<constructor name="RenameOptionSetParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Rename option set parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.RenewerParam -->
<class name="RenewerParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="RenewerParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Renewer parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.RenewerParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.ReplicationParam -->
<class name="ReplicationParam" extends="org.apache.hadoop.hdfs.web.resources.ShortParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ReplicationParam" type="java.lang.Short"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param value the parameter value.]]>
</doc>
</constructor>
<constructor name="ReplicationParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getValue" return="short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[@return the value or, if it is null, return the default from conf.]]>
</doc>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Replication parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.ReplicationParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.SnapshotNameParam -->
<class name="SnapshotNameParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="SnapshotNameParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[The snapshot name parameter for createSnapshot and deleteSnapshot operation.
Also used to indicate the new snapshot name for renameSnapshot operation.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.SnapshotNameParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.TokenArgumentParam -->
<class name="TokenArgumentParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="TokenArgumentParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str A string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[Represents delegation token parameter as method arguments. This is
different from {@link DelegationParam}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.TokenArgumentParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.TokenKindParam -->
<class name="TokenKindParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="TokenKindParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.TokenKindParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.TokenServiceParam -->
<class name="TokenServiceParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="TokenServiceParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.TokenServiceParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.UriFsPathParam -->
<class name="UriFsPathParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UriFsPathParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAbsolutePath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the absolute path.]]>
</doc>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<doc>
<![CDATA[The FileSystem path parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.UriFsPathParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.UserParam -->
<class name="UserParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UserParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<constructor name="UserParam" type="org.apache.hadoop.security.UserGroupInformation"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Construct an object from a UGI.]]>
</doc>
</constructor>
<method name="getUserPatternDomain" return="org.apache.hadoop.hdfs.web.resources.StringParam.Domain"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setUserPatternDomain"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="dm" type="org.apache.hadoop.hdfs.web.resources.StringParam.Domain"/>
</method>
<method name="setUserPattern"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="pattern" type="java.lang.String"/>
</method>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
<doc>
<![CDATA[User parameter.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.UserParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.UserProvider -->
<class name="UserProvider" extends="com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="com.sun.jersey.spi.inject.InjectableProvider"/>
<constructor name="UserProvider"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getValue" return="org.apache.hadoop.security.UserGroupInformation"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="context" type="com.sun.jersey.api.core.HttpContext"/>
</method>
<method name="getScope" return="com.sun.jersey.core.spi.component.ComponentScope"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getInjectable" return="com.sun.jersey.spi.inject.Injectable"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="componentContext" type="com.sun.jersey.core.spi.component.ComponentContext"/>
<param name="context" type="javax.ws.rs.core.Context"/>
<param name="type" type="java.lang.reflect.Type"/>
</method>
<doc>
<![CDATA[Inject user information to http operations.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.UserProvider -->
<!-- start class org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam -->
<class name="XAttrEncodingParam" extends="org.apache.hadoop.hdfs.web.resources.EnumParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="XAttrEncodingParam" type="org.apache.hadoop.fs.XAttrCodec"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="XAttrEncodingParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getValueString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getEncoding" return="org.apache.hadoop.fs.XAttrCodec"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.XAttrNameParam -->
<class name="XAttrNameParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="XAttrNameParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getXAttrName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.XAttrNameParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam -->
<class name="XAttrSetFlagParam" extends="org.apache.hadoop.hdfs.web.resources.EnumSetParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="XAttrSetFlagParam" type="java.util.EnumSet"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="XAttrSetFlagParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor.
@param str a string representation of the parameter value.]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFlag" return="java.util.EnumSet"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam -->
<!-- start class org.apache.hadoop.hdfs.web.resources.XAttrValueParam -->
<class name="XAttrValueParam" extends="org.apache.hadoop.hdfs.web.resources.StringParam"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="XAttrValueParam" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getXAttrValue" return="byte[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="NAME" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Parameter name.]]>
</doc>
</field>
<field name="DEFAULT" type="java.lang.String"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Default parameter value.]]>
</doc>
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.web.resources.XAttrValueParam -->
</package>
</api>