HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client implementation. Contributed by Takuya Fukudome

This commit is contained in:
Tsz-Wo Nicholas Sze 2015-04-07 13:59:48 -07:00
parent 0522d6970d
commit a4f0eea261
15 changed files with 34 additions and 0 deletions

View File

@ -64,6 +64,9 @@ Release 2.8.0 - UNRELEASED
HDFS-7888. Change DFSOutputStream and DataStreamer for convenience of
subclassing. (Li Bo via szetszwo)
HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client
implementation. (Takuya Fukudome via szetszwo)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
@ -28,6 +29,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
* A BlockReader is responsible for reading a single block
* from a single datanode.
*/
@InterfaceAudience.Private
public interface BlockReader extends ByteBufferReadable {

View File

@ -24,6 +24,7 @@ import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSClient.Conf;
@ -57,6 +58,7 @@ import com.google.common.base.Preconditions;
* <li>The client reads the file descriptors.</li>
* </ul>
*/
@InterfaceAudience.Private
class BlockReaderLocal implements BlockReader {
static final Log LOG = LogFactory.getLog(BlockReaderLocal.class);

View File

@ -31,6 +31,7 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.fs.StorageType;
@ -73,6 +74,7 @@ import org.apache.htrace.TraceScope;
* if security is enabled.</li>
* </ul>
*/
@InterfaceAudience.Private
class BlockReaderLocalLegacy implements BlockReader {
private static final Log LOG = LogFactory.getLog(BlockReaderLocalLegacy.class);

View File

@ -17,11 +17,14 @@
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.IOException;
/**
* For sharing between the local and remote block reader implementations.
*/
@InterfaceAudience.Private
class BlockReaderUtil {
/* See {@link BlockReader#readAll(byte[], int, int)} */

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
@ -29,6 +30,7 @@ import org.apache.hadoop.fs.RemoteIterator;
* Provides an iterator interface for listCorruptFileBlocks.
* This class is used by DistributedFileSystem and Hdfs.
*/
@InterfaceAudience.Private
public class CorruptFileBlockIterator implements RemoteIterator<Path> {
private final DFSClient dfs;
private final String path;

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import java.util.concurrent.atomic.AtomicLong;
/**
@ -24,6 +26,7 @@ import java.util.concurrent.atomic.AtomicLong;
* This class has a number of metrics variables that are publicly accessible,
* we can grab them from client side, like HBase.
*/
@InterfaceAudience.Private
public class DFSHedgedReadMetrics {
public final AtomicLong hedgedReadOps = new AtomicLong();
public final AtomicLong hedgedReadOpsWin = new AtomicLong();

View File

@ -23,6 +23,7 @@ import java.nio.BufferOverflowException;
import java.nio.channels.ClosedChannelException;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.util.ByteArrayManager;
@ -34,6 +35,7 @@ import org.apache.htrace.Span;
* to send them to datanodes.
****************************************************************/
@InterfaceAudience.Private
class DFSPacket {
public static final long HEART_BEAT_SEQNO = -1L;
private static long[] EMPTY = new long[0];

View File

@ -40,6 +40,8 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -109,6 +111,7 @@ import com.google.common.cache.RemovalNotification;
*
*********************************************************************/
@InterfaceAudience.Private
class DataStreamer extends Daemon {
/**
* Create a socket for a write pipeline

View File

@ -19,11 +19,13 @@ package org.apache.hadoop.hdfs;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
/**
* An immutable key which identifies a block.
*/
@InterfaceAudience.Private
final public class ExtendedBlockId {
/**
* The block ID for this block.

View File

@ -34,6 +34,7 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -56,6 +57,7 @@ import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
@InterfaceAudience.Private
public class HAUtil {
private static final Log LOG =

View File

@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
@ -35,6 +36,7 @@ import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
@InterfaceAudience.Private
public class KeyProviderCache {
public static final Log LOG = LogFactory.getLog(KeyProviderCache.class);

View File

@ -30,6 +30,7 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
@ -68,6 +69,7 @@ import com.google.common.annotations.VisibleForTesting;
* </ul>
* </p>
*/
@InterfaceAudience.Private
class LeaseRenewer {
static final Log LOG = LogFactory.getLog(LeaseRenewer.class);

View File

@ -40,6 +40,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient.Conf;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@ -94,6 +95,7 @@ import com.google.common.base.Preconditions;
* {@link NameNodeProxies#createProxy(Configuration, URI, Class)}, which will
* create either an HA- or non-HA-enabled client proxy as appropriate.
*/
@InterfaceAudience.Private
public class NameNodeProxies {
private static final Log LOG = LogFactory.getLog(NameNodeProxies.class);

View File

@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.security.token.Token;
@InterfaceAudience.Private
public interface RemotePeerFactory {
/**
* @param addr The address to connect to.