From 778146eaae5b1e17928a1f26fb1e46536a6ee510 Mon Sep 17 00:00:00 2001 From: Uma Mahesh Date: Mon, 4 Jan 2016 14:32:09 -0800 Subject: [PATCH] HADOOP-12658. Clear javadoc and check style issues around DomainSocket. Contributed by Kai Zheng --- .../hadoop-common/CHANGES.txt | 3 + .../java/org/apache/hadoop/io/IOUtils.java | 5 +- .../apache/hadoop/net/unix/DomainSocket.java | 59 ++++--------------- .../hadoop/hdfs/BlockReaderFactory.java | 2 +- .../shortcircuit/DfsClientShmManager.java | 2 +- .../hdfs/server/datanode/DataXceiver.java | 9 +-- .../server/datanode/ShortCircuitRegistry.java | 14 ++++- 7 files changed, 37 insertions(+), 57 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 03ba78b402d..1a53ee86b2b 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -1025,6 +1025,9 @@ Release 2.8.0 - UNRELEASED HADOOP-12686. Update FileSystemShell documentation to mention the meaning of each columns of fs -du. (Daisuke Kobayashi via aajisaka) + HADOOP-12658. Clear javadoc and check style issues around DomainSocket + (Kai Zheng via umamahesh) + OPTIMIZATIONS HADOOP-11785. Reduce the number of listStatus operation in distcp diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java index e6c00c940bb..451163cc140 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java @@ -55,7 +55,8 @@ public class IOUtils { * @param close whether or not close the InputStream and * OutputStream at the end. The streams are closed in the finally clause. */ - public static void copyBytes(InputStream in, OutputStream out, int buffSize, boolean close) + public static void copyBytes(InputStream in, OutputStream out, + int buffSize, boolean close) throws IOException { try { copyBytes(in, out, buffSize); @@ -194,7 +195,7 @@ public static int wrappedReadForCompressedData(InputStream is, byte[] buf, * @throws IOException if it could not read requested number of bytes * for any reason (including EOF) */ - public static void readFully(InputStream in, byte buf[], + public static void readFully(InputStream in, byte[] buf, int off, int len) throws IOException { int toRead = len; while (toRead > 0) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java index f1035e2dd64..8379fd1a4bb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java @@ -63,7 +63,8 @@ public class DomainSocket implements Closeable { static Log LOG = LogFactory.getLog(DomainSocket.class); /** - * True only if we should validate the paths used in {@link DomainSocket#bind()} + * True only if we should validate the paths used in + * {@link DomainSocket#bindAndListen(String)} */ private static boolean validateBindPaths = true; @@ -220,11 +221,11 @@ public static DomainSocket[] socketpair() throws IOException { * * This method can only be used on sockets that were bound with bind(). * - * @return The new connection. - * @throws IOException If there was an I/O error - * performing the accept-- such as the - * socket being closed from under us. - * @throws SocketTimeoutException If the accept timed out. + * @return The new connection. + * @throws IOException If there was an I/O error performing the accept-- + * such as the socket being closed from under us. + * Particularly when the accept is timed out, it throws + * SocketTimeoutException. */ public DomainSocket accept() throws IOException { refCount.reference(); @@ -238,13 +239,15 @@ public DomainSocket accept() throws IOException { } } - private static native int connect0(String path); + private static native int connect0(String path) throws IOException; /** * Create a new DomainSocket connected to the given path. * - * @param path The path to connect to. - * @return The new DomainSocket. + * @param path The path to connect to. + * @throws IOException If there was an I/O error performing the connect. + * + * @return The new DomainSocket. */ public static DomainSocket connect(String path) throws IOException { if (loadingFailureReason != null) { @@ -425,47 +428,11 @@ public void sendFileDescriptors(FileDescriptor descriptors[], private static native int receiveFileDescriptors0(int fd, FileDescriptor[] descriptors, - byte jbuf[], int offset, int length) throws IOException; - - /** - * Receive some FileDescriptor objects from the process on the other side of - * this socket. - * - * @param descriptors (output parameter) Array of FileDescriptors. - * We will fill as many slots as possible with file - * descriptors passed from the remote process. The - * other slots will contain NULL. - * @param jbuf (output parameter) Buffer to read into. - * The UNIX domain sockets API requires you to read - * at least one byte from the remote process, even - * if all you care about is the file descriptors - * you will receive. - * @param offset Offset into the byte buffer to load data - * @param length Length of the byte buffer to use for data - * - * @return The number of bytes read. This will be -1 if we - * reached EOF (similar to SocketInputStream); - * otherwise, it will be positive. - * @throws IOException if there was an I/O error. - */ - public int receiveFileDescriptors(FileDescriptor[] descriptors, - byte jbuf[], int offset, int length) throws IOException { - refCount.reference(); - boolean exc = true; - try { - int nBytes = receiveFileDescriptors0(fd, descriptors, jbuf, offset, length); - exc = false; - return nBytes; - } finally { - unreference(exc); - } - } + byte[] buf, int offset, int length) throws IOException; /** * Receive some FileDescriptor objects from the process on the other side of * this socket, and wrap them in FileInputStream objects. - * - * See {@link DomainSocket#recvFileInputStreams(ByteBuffer)} */ public int recvFileInputStreams(FileInputStream[] streams, byte buf[], int offset, int length) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java index c7e2a7d671b..5c7bbd7dfa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java @@ -591,7 +591,7 @@ private ShortCircuitReplicaInfo requestFileDescriptors(DomainPeer peer, switch (resp.getStatus()) { case SUCCESS: byte buf[] = new byte[1]; - FileInputStream fis[] = new FileInputStream[2]; + FileInputStream[] fis = new FileInputStream[2]; sock.recvFileInputStreams(fis, buf, 0, buf.length); ShortCircuitReplica replica = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java index c421fe8b96d..6f8a8fa8f2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java @@ -168,7 +168,7 @@ private DfsClientShm requestNewShm(String clientName, DomainPeer peer) case SUCCESS: DomainSocket sock = peer.getDomainSocket(); byte buf[] = new byte[1]; - FileInputStream fis[] = new FileInputStream[1]; + FileInputStream[] fis = new FileInputStream[1]; if (sock.recvFileInputStreams(fis, buf, 0, buf.length) < 0) { throw new EOFException("got EOF while trying to transfer the " + "file descriptor for the shared memory segment."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 474b40bb48b..94ce6360b64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -425,12 +425,12 @@ private void sendShmSuccessResponse(DomainSocket sock, NewShmInfo shmInfo) throws IOException { DataNodeFaultInjector.get().sendShortCircuitShmResponse(); ShortCircuitShmResponseProto.newBuilder().setStatus(SUCCESS). - setId(PBHelperClient.convert(shmInfo.shmId)).build(). + setId(PBHelperClient.convert(shmInfo.getShmId())).build(). writeDelimitedTo(socketOut); // Send the file descriptor for the shared memory segment. byte buf[] = new byte[] { (byte)0 }; FileDescriptor shmFdArray[] = - new FileDescriptor[] { shmInfo.stream.getFD() }; + new FileDescriptor[] {shmInfo.getFileStream().getFD()}; sock.sendFileDescriptors(shmFdArray, buf, 0, buf.length); } @@ -471,7 +471,8 @@ public void requestShortCircuitShm(String clientName) throws IOException { "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "op: REQUEST_SHORT_CIRCUIT_SHM," + " shmId: %016x%016x, srvID: %s, success: true", - clientName, shmInfo.shmId.getHi(), shmInfo.shmId.getLo(), + clientName, shmInfo.getShmId().getHi(), + shmInfo.getShmId().getLo(), datanode.getDatanodeUuid())); } else { BlockSender.ClientTraceLog.info(String.format( @@ -490,7 +491,7 @@ public void requestShortCircuitShm(String clientName) throws IOException { // bad behavior inside the poll() call. See HADOOP-11802 for details. try { LOG.warn("Failed to send success response back to the client. " + - "Shutting down socket for " + shmInfo.shmId + "."); + "Shutting down socket for " + shmInfo.getShmId() + "."); sock.shutdown(); } catch (IOException e) { LOG.warn("Failed to shut down socket in error handler", e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java index b32c0d167c5..52856af2c46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java @@ -165,7 +165,7 @@ public ShortCircuitRegistry(Configuration conf) throws IOException { DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS + " was set to " + interruptCheck); } - String shmPaths[] = + String[] shmPaths = conf.getTrimmedStrings(DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS); if (shmPaths.length == 0) { shmPaths = @@ -263,14 +263,22 @@ public synchronized String getClientNames(ExtendedBlockId blockId) { } public static class NewShmInfo implements Closeable { - public final ShmId shmId; - public final FileInputStream stream; + private final ShmId shmId; + private final FileInputStream stream; NewShmInfo(ShmId shmId, FileInputStream stream) { this.shmId = shmId; this.stream = stream; } + public ShmId getShmId() { + return shmId; + } + + public FileInputStream getFileStream() { + return stream; + } + @Override public void close() throws IOException { stream.close();