From 6ee0539ede78b640f01c5eac18ded161182a7835 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Tue, 29 Sep 2015 17:51:15 -0700 Subject: [PATCH] Revert "HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai." This reverts commit d5a9a3daa0224249221ffa7b8bd5751ab2feca56. --- .../hadoop/hdfs/BlockReaderFactory.java | 114 ++++++++++++------ .../apache/hadoop/hdfs/BlockReaderLocal.java | 77 ++++++++---- .../hadoop/hdfs/BlockReaderLocalLegacy.java | 20 ++- .../org/apache/hadoop/hdfs/DFSClient.java | 36 ++++-- .../apache/hadoop/hdfs/DFSInputStream.java | 85 ++++++++----- .../apache/hadoop/hdfs/DFSOutputStream.java | 36 ++++-- .../org/apache/hadoop/hdfs/DFSUtilClient.java | 10 +- .../org/apache/hadoop/hdfs/DataStreamer.java | 47 ++++++-- .../apache/hadoop/hdfs/RemoteBlockReader.java | 4 +- .../hadoop/hdfs/RemoteBlockReader2.java | 25 ++-- .../hadoop/hdfs/client/impl/LeaseRenewer.java | 18 ++- .../protocol/datatransfer/PacketReceiver.java | 8 +- .../hdfs/protocol/datatransfer/Sender.java | 6 +- .../sasl/DataTransferSaslUtil.java | 8 +- .../ClientDatanodeProtocolTranslatorPB.java | 10 +- .../shortcircuit/DfsClientShmManager.java | 45 +++++-- .../hdfs/shortcircuit/ShortCircuitCache.java | 101 ++++++++++++---- .../shortcircuit/ShortCircuitReplica.java | 33 +++-- .../hdfs/shortcircuit/ShortCircuitShm.java | 14 ++- .../hadoop/hdfs/util/ByteArrayManager.java | 63 ++++++++-- .../apache/hadoop/hdfs/web/TokenAspect.java | 8 +- .../hadoop/hdfs/web/URLConnectionFactory.java | 4 +- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 28 +++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 - 24 files changed, 573 insertions(+), 230 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java index f249692022f..4f370908bb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java @@ -349,13 +349,17 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { if (clientContext.getUseLegacyBlockReaderLocal()) { reader = getLegacyBlockReaderLocal(); if (reader != null) { - LOG.trace("{}: returning new legacy block reader local.", this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": returning new legacy block reader local."); + } return reader; } } else { reader = getBlockReaderLocal(); if (reader != null) { - LOG.trace("{}: returning new block reader local.", this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": returning new block reader local."); + } return reader; } } @@ -363,8 +367,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { if (scConf.isDomainSocketDataTraffic()) { reader = getRemoteBlockReaderFromDomain(); if (reader != null) { - LOG.trace("{}: returning new remote block reader using UNIX domain " - + "socket on {}", this, pathInfo.getPath()); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": returning new remote block reader using " + + "UNIX domain socket on " + pathInfo.getPath()); + } return reader; } } @@ -399,8 +405,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { setVisibleLength(visibleLength). build(); if (accessor == null) { - LOG.trace("{}: No ReplicaAccessor created by {}", - this, cls.getName()); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": No ReplicaAccessor created by " + + cls.getName()); + } } else { return new ExternalBlockReader(accessor, visibleLength, startOffset); } @@ -419,10 +427,14 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { * first introduced in HDFS-2246. */ private BlockReader getLegacyBlockReaderLocal() throws IOException { - LOG.trace("{}: trying to construct BlockReaderLocalLegacy", this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": trying to construct BlockReaderLocalLegacy"); + } if (!DFSUtilClient.isLocalAddress(inetSocketAddress)) { - LOG.trace("{}: can't construct BlockReaderLocalLegacy because the address" - + "{} is not local", this, inetSocketAddress); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " + + "the address " + inetSocketAddress + " is not local"); + } return null; } if (clientContext.getDisableLegacyBlockReaderLocal()) { @@ -458,8 +470,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { } private BlockReader getBlockReaderLocal() throws InvalidToken { - LOG.trace("{}: trying to construct a BlockReaderLocal for short-circuit " - + " reads.", this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": trying to construct a BlockReaderLocal " + + "for short-circuit reads."); + } if (pathInfo == null) { pathInfo = clientContext.getDomainSocketFactory() .getPathInfo(inetSocketAddress, conf.getShortCircuitConf()); @@ -474,8 +488,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { ShortCircuitReplicaInfo info = cache.fetchOrCreate(key, this); InvalidToken exc = info.getInvalidTokenException(); if (exc != null) { - LOG.trace("{}: got InvalidToken exception while trying to construct " - + "BlockReaderLocal via {}", this, pathInfo.getPath()); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": got InvalidToken exception while trying to " + + "construct BlockReaderLocal via " + pathInfo.getPath()); + } throw exc; } if (info.getReplica() == null) { @@ -511,7 +527,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { createShortCircuitReplicaInfoCallback.createShortCircuitReplicaInfo(); if (info != null) return info; } - LOG.trace("{}: trying to create ShortCircuitReplicaInfo.", this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": trying to create ShortCircuitReplicaInfo."); + } BlockReaderPeer curPeer; while (true) { curPeer = nextDomainPeer(); @@ -526,8 +544,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId()), clientName); if (usedPeer.booleanValue()) { - LOG.trace("{}: allocShmSlot used up our previous socket {}. " - + "Allocating a new one...", this, peer.getDomainSocket()); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": allocShmSlot used up our previous socket " + + peer.getDomainSocket() + ". Allocating a new one..."); + } curPeer = nextDomainPeer(); if (curPeer == null) break; peer = (DomainPeer)curPeer.peer; @@ -542,7 +562,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { if (curPeer.fromCache) { // Handle an I/O error we got when using a cached socket. // These are considered less serious, because the socket may be stale. - LOG.debug("{}: closing stale domain peer {}", this, peer, e); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": closing stale domain peer " + peer, e); + } IOUtilsClient.cleanup(LOG, peer); } else { // Handle an I/O error we got when using a newly created socket. @@ -595,7 +617,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId()); if (buf[0] == USE_RECEIPT_VERIFICATION.getNumber()) { - LOG.trace("Sending receipt verification byte for slot {}", slot); + LOG.trace("Sending receipt verification byte for slot " + slot); sock.getOutputStream().write(0); } replica = new ShortCircuitReplica(key, fis[0], fis[1], cache, @@ -628,7 +650,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { String msg = "access control error while " + "attempting to set up short-circuit access to " + fileName + resp.getMessage(); - LOG.debug("{}:{}", this, msg); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ":" + msg); + } return new ShortCircuitReplicaInfo(new InvalidToken(msg)); default: LOG.warn(this + ": unknown response code " + resp.getStatus() + @@ -660,8 +684,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { " is not usable.", this, pathInfo); return null; } - LOG.trace("{}: trying to create a remote block reader from the UNIX domain " - + "socket at {}", this, pathInfo.getPath()); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": trying to create a remote block reader from the " + + "UNIX domain socket at " + pathInfo.getPath()); + } while (true) { BlockReaderPeer curPeer = nextDomainPeer(); @@ -675,15 +701,19 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { } catch (IOException ioe) { IOUtilsClient.cleanup(LOG, peer); if (isSecurityException(ioe)) { - LOG.trace("{}: got security exception while constructing a remote " - + " block reader from the unix domain socket at {}", - this, pathInfo.getPath(), ioe); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": got security exception while constructing " + + "a remote block reader from the unix domain socket at " + + pathInfo.getPath(), ioe); + } throw ioe; } if (curPeer.fromCache) { // Handle an I/O error we got when using a cached peer. These are // considered less serious, because the underlying socket may be stale. - LOG.debug("Closed potentially stale domain peer {}", peer, ioe); + if (LOG.isDebugEnabled()) { + LOG.debug("Closed potentially stale domain peer " + peer, ioe); + } } else { // Handle an I/O error we got when using a newly created domain peer. // We temporarily disable the domain socket path for a few minutes in @@ -717,8 +747,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { * If there was another problem. */ private BlockReader getRemoteBlockReaderFromTcp() throws IOException { - LOG.trace("{}: trying to create a remote block reader from a TCP socket", - this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": trying to create a remote block reader from a " + + "TCP socket"); + } BlockReader blockReader = null; while (true) { BlockReaderPeer curPeer = null; @@ -731,15 +763,19 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { return blockReader; } catch (IOException ioe) { if (isSecurityException(ioe)) { - LOG.trace("{}: got security exception while constructing a remote " - + "block reader from {}", this, peer, ioe); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": got security exception while constructing " + + "a remote block reader from " + peer, ioe); + } throw ioe; } if ((curPeer != null) && curPeer.fromCache) { // Handle an I/O error we got when using a cached peer. These are // considered less serious, because the underlying socket may be // stale. - LOG.debug("Closed potentially stale remote peer {}", peer, ioe); + if (LOG.isDebugEnabled()) { + LOG.debug("Closed potentially stale remote peer " + peer, ioe); + } } else { // Handle an I/O error we got when using a newly created peer. LOG.warn("I/O error constructing remote block reader.", ioe); @@ -772,7 +808,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { if (remainingCacheTries > 0) { Peer peer = clientContext.getPeerCache().get(datanode, true); if (peer != null) { - LOG.trace("nextDomainPeer: reusing existing peer {}", peer); + if (LOG.isTraceEnabled()) { + LOG.trace("nextDomainPeer: reusing existing peer " + peer); + } return new BlockReaderPeer(peer, true); } } @@ -794,18 +832,24 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { if (remainingCacheTries > 0) { Peer peer = clientContext.getPeerCache().get(datanode, false); if (peer != null) { - LOG.trace("nextTcpPeer: reusing existing peer {}", peer); + if (LOG.isTraceEnabled()) { + LOG.trace("nextTcpPeer: reusing existing peer " + peer); + } return new BlockReaderPeer(peer, true); } } try { Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress, token, datanode); - LOG.trace("nextTcpPeer: created newConnectedPeer {}", peer); + if (LOG.isTraceEnabled()) { + LOG.trace("nextTcpPeer: created newConnectedPeer " + peer); + } return new BlockReaderPeer(peer, false); } catch (IOException e) { - LOG.trace("nextTcpPeer: failed to create newConnectedPeer connected to" - + "{}", datanode); + if (LOG.isTraceEnabled()) { + LOG.trace("nextTcpPeer: failed to create newConnectedPeer " + + "connected to " + datanode); + } throw e; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index 10dc35cb142..62e7af6322f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -412,10 +412,17 @@ class BlockReaderLocal implements BlockReader { public synchronized int read(ByteBuffer buf) throws IOException { boolean canSkipChecksum = createNoChecksumContext(); try { - String traceFormatStr = "read(buf.remaining={}, block={}, filename={}, " - + "canSkipChecksum={})"; - LOG.trace(traceFormatStr + ": starting", - buf.remaining(), block, filename, canSkipChecksum); + String traceString = null; + if (LOG.isTraceEnabled()) { + traceString = new StringBuilder(). + append("read("). + append("buf.remaining=").append(buf.remaining()). + append(", block=").append(block). + append(", filename=").append(filename). + append(", canSkipChecksum=").append(canSkipChecksum). + append(")").toString(); + LOG.info(traceString + ": starting"); + } int nRead; try { if (canSkipChecksum && zeroReadaheadRequested) { @@ -424,12 +431,14 @@ class BlockReaderLocal implements BlockReader { nRead = readWithBounceBuffer(buf, canSkipChecksum); } } catch (IOException e) { - LOG.trace(traceFormatStr + ": I/O error", - buf.remaining(), block, filename, canSkipChecksum, e); + if (LOG.isTraceEnabled()) { + LOG.info(traceString + ": I/O error", e); + } throw e; } - LOG.trace(traceFormatStr + ": returning {}", - buf.remaining(), block, filename, canSkipChecksum, nRead); + if (LOG.isTraceEnabled()) { + LOG.info(traceString + ": returning " + nRead); + } return nRead; } finally { if (canSkipChecksum) releaseNoChecksumContext(); @@ -481,8 +490,10 @@ class BlockReaderLocal implements BlockReader { } dataBuf.limit(dataBuf.position()); dataBuf.position(Math.min(dataBuf.position(), slop)); - LOG.trace("loaded {} bytes into bounce buffer from offset {} of {}", - dataBuf.remaining(), oldDataPos, block); + if (LOG.isTraceEnabled()) { + LOG.trace("loaded " + dataBuf.remaining() + " bytes into bounce " + + "buffer from offset " + oldDataPos + " of " + block); + } return dataBuf.limit() != maxReadaheadLength; } @@ -554,10 +565,18 @@ class BlockReaderLocal implements BlockReader { boolean canSkipChecksum = createNoChecksumContext(); int nRead; try { - final String traceFormatStr = "read(arr.length={}, off={}, len={}, " - + "filename={}, block={}, canSkipChecksum={})"; - LOG.trace(traceFormatStr + ": starting", - arr.length, off, len, filename, block, canSkipChecksum); + String traceString = null; + if (LOG.isTraceEnabled()) { + traceString = new StringBuilder(). + append("read(arr.length=").append(arr.length). + append(", off=").append(off). + append(", len=").append(len). + append(", filename=").append(filename). + append(", block=").append(block). + append(", canSkipChecksum=").append(canSkipChecksum). + append(")").toString(); + LOG.trace(traceString + ": starting"); + } try { if (canSkipChecksum && zeroReadaheadRequested) { nRead = readWithoutBounceBuffer(arr, off, len); @@ -565,12 +584,14 @@ class BlockReaderLocal implements BlockReader { nRead = readWithBounceBuffer(arr, off, len, canSkipChecksum); } } catch (IOException e) { - LOG.trace(traceFormatStr + ": I/O error", - arr.length, off, len, filename, block, canSkipChecksum, e); + if (LOG.isTraceEnabled()) { + LOG.trace(traceString + ": I/O error", e); + } throw e; } - LOG.trace(traceFormatStr + ": returning {}", - arr.length, off, len, filename, block, canSkipChecksum, nRead); + if (LOG.isTraceEnabled()) { + LOG.trace(traceString + ": returning " + nRead); + } } finally { if (canSkipChecksum) releaseNoChecksumContext(); } @@ -613,9 +634,11 @@ class BlockReaderLocal implements BlockReader { dataBuf.position(dataBuf.position() + discardedFromBuf); remaining -= discardedFromBuf; } - LOG.trace("skip(n={}, block={}, filename={}): discarded {} bytes from " - + "dataBuf and advanced dataPos by {}", - n, block, filename, discardedFromBuf, remaining); + if (LOG.isTraceEnabled()) { + LOG.trace("skip(n=" + n + ", block=" + block + ", filename=" + + filename + "): discarded " + discardedFromBuf + " bytes from " + + "dataBuf and advanced dataPos by " + remaining); + } dataPos += remaining; return n; } @@ -630,7 +653,9 @@ class BlockReaderLocal implements BlockReader { public synchronized void close() throws IOException { if (closed) return; closed = true; - LOG.trace("close(filename={}, block={})", filename, block); + if (LOG.isTraceEnabled()) { + LOG.trace("close(filename=" + filename + ", block=" + block + ")"); + } replica.unref(); freeDataBufIfExists(); freeChecksumBufIfExists(); @@ -680,9 +705,11 @@ class BlockReaderLocal implements BlockReader { (opts.contains(ReadOption.SKIP_CHECKSUMS) == false); if (anchor) { if (!createNoChecksumContext()) { - LOG.trace("can't get an mmap for {} of {} since SKIP_CHECKSUMS was not " - + "given, we aren't skipping checksums, and the block is not " - + "mlocked.", block, filename); + if (LOG.isTraceEnabled()) { + LOG.trace("can't get an mmap for " + block + " of " + filename + + " since SKIP_CHECKSUMS was not given, " + + "we aren't skipping checksums, and the block is not mlocked."); + } return null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java index 4a1828e9dc1..523528761db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java @@ -221,9 +221,11 @@ class BlockReaderLocalLegacy implements BlockReader { File blkfile = new File(pathinfo.getBlockPath()); dataIn = new FileInputStream(blkfile); - LOG.debug("New BlockReaderLocalLegacy for file {} of size {} startOffset " - + "{} length {} short circuit checksum {}", - blkfile, blkfile.length(), startOffset, length, !skipChecksumCheck); + if (LOG.isDebugEnabled()) { + LOG.debug("New BlockReaderLocalLegacy for file " + blkfile + " of size " + + blkfile.length() + " startOffset " + startOffset + " length " + + length + " short circuit checksum " + !skipChecksumCheck); + } if (!skipChecksumCheck) { // get the metadata file @@ -290,7 +292,9 @@ class BlockReaderLocalLegacy implements BlockReader { // channel for the DataNode to notify the client that the path has been // invalidated. Therefore, our only option is to skip caching. if (pathinfo != null && !storageType.isTransient()) { - LOG.debug("Cached location of block {} as {}", blk, pathinfo); + if (LOG.isDebugEnabled()) { + LOG.debug("Cached location of block " + blk + " as " + pathinfo); + } localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo); } } catch (IOException e) { @@ -599,7 +603,9 @@ class BlockReaderLocalLegacy implements BlockReader { @Override public synchronized int read(byte[] buf, int off, int len) throws IOException { - LOG.trace("read off {} len {}", off, len); + if (LOG.isTraceEnabled()) { + LOG.trace("read off " + off + " len " + len); + } if (!verifyChecksum) { return dataIn.read(buf, off, len); } @@ -618,7 +624,9 @@ class BlockReaderLocalLegacy implements BlockReader { @Override public synchronized long skip(long n) throws IOException { - LOG.debug("skip {}", n); + if (LOG.isDebugEnabled()) { + LOG.debug("skip " + n); + } if (n <= 0) { return 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 8f3df81f576..92d117cc7a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -419,7 +419,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } final int idx = r.nextInt(localInterfaceAddrs.length); final SocketAddress addr = localInterfaceAddrs[idx]; - LOG.debug("Using local interface {}", addr); + if (LOG.isDebugEnabled()) { + LOG.debug("Using local interface " + addr); + } return addr; } @@ -1214,7 +1216,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, InetSocketAddress[] favoredNodes) throws IOException { checkOpen(); final FsPermission masked = applyUMask(permission); - LOG.debug("{}: masked={}", src, masked); + if(LOG.isDebugEnabled()) { + LOG.debug(src + ": masked=" + masked); + } final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this, src, masked, flag, createParent, replication, blockSize, progress, buffersize, dfsClientConf.createChecksum(checksumOpt), @@ -1811,8 +1815,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, smallBufferSize)); in = new DataInputStream(pair.in); - LOG.debug("write to {}: {}, block={}", - datanodes[j], Op.BLOCK_CHECKSUM, block); + if (LOG.isDebugEnabled()) { + LOG.debug("write to " + datanodes[j] + ": " + + Op.BLOCK_CHECKSUM + ", block=" + block); + } // get block MD5 new Sender(out).blockChecksum(block, lb.getBlockToken()); @@ -1876,10 +1882,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } catch (InvalidBlockTokenException ibte) { if (i > lastRetriedIndex) { - LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " - + "for file {} for block {} from datanode {}. Will retry the " - + "block once.", - src, block, datanodes[j]); + if (LOG.isDebugEnabled()) { + LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + + "for file " + src + " for block " + block + + " from datanode " + datanodes[j] + + ". Will retry the block once."); + } lastRetriedIndex = i; done = true; // actually it's not done; but we'll retry i--; // repeat at i-th block @@ -1933,7 +1941,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, try { sock = socketFactory.createSocket(); String dnAddr = dn.getXferAddr(getConf().isConnectToDnViaHostname()); - LOG.debug("Connecting to datanode {}", dnAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr); + } NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout); sock.setSoTimeout(timeout); @@ -2553,7 +2563,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, absPermission = applyUMask(null); } - LOG.debug("{}: masked={}", src, absPermission); + if(LOG.isDebugEnabled()) { + LOG.debug(src + ": masked=" + absPermission); + } TraceScope scope = tracer.newScope("mkdir"); try { return namenode.mkdirs(src, absPermission, createParent); @@ -3049,7 +3061,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } }); HEDGED_READ_THREAD_POOL.allowCoreThreadTimeOut(true); - LOG.debug("Using hedged reads; pool threads={}", num); + if (LOG.isDebugEnabled()) { + LOG.debug("Using hedged reads; pool threads=" + num); + } } ThreadPoolExecutor getHedgedReadsThreadPool() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 81e8c270884..7101753b256 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -315,7 +315,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, if (locatedBlocks == null || refresh) { newInfo = dfsClient.getLocatedBlocks(src, 0); } - DFSClient.LOG.debug("newInfo = {}", newInfo); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("newInfo = " + newInfo); + } if (newInfo == null) { throw new IOException("Cannot open filename " + src); } @@ -381,8 +383,10 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, replicaNotFoundCount--; } - DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode {}" - + " for block {}", datanode, locatedblock.getBlock(), ioe); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode " + + datanode + " for block " + locatedblock.getBlock(), ioe); + } } finally { if (cdp != null) { RPC.stopProxy(cdp); @@ -1063,7 +1067,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, } final String dnAddr = chosenNode.getXferAddr(dfsClient.getConf().isConnectToDnViaHostname()); - DFSClient.LOG.debug("Connecting to datanode {}", dnAddr); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Connecting to datanode " + dnAddr); + } InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr); return new DNAddrPair(chosenNode, targetAddr, storageType); } @@ -1303,8 +1309,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, future.get(); return; } - DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged " - + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Waited " + conf.getHedgedReadThresholdMillis() + + "ms to read from " + chosenNode.info + + "; spawning hedged read"); + } // Ignore this node on next go around. ignored.add(chosenNode.info); dfsClient.getHedgedReadMetrics().incHedgedReadOps(); @@ -1331,8 +1340,10 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, .submit(getFromDataNodeCallable); futures.add(oneMoreRequest); } catch (IOException ioe) { - DFSClient.LOG.debug("Failed getting node for hedged read: {}", - ioe.getMessage()); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Failed getting node for hedged read: " + + ioe.getMessage()); + } } // if not succeeded. Submit callables for each datanode in a loop, wait // for a fixed interval and get the result from the fastest one. @@ -1588,8 +1599,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, throw new IOException(errMsg); } } catch (IOException e) {//make following read to retry - DFSClient.LOG.debug("Exception while seek to {} from {} of {} from " - + "{}", targetPos, getCurrentBlock(), src, currentNode, e); + if(DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Exception while seek to " + targetPos + + " from " + getCurrentBlock() + " of " + src + " from " + + currentNode, e); + } } } } @@ -1805,16 +1819,20 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, } else { length63 = 1 + curEnd - curPos; if (length63 <= 0) { - DFSClient.LOG.debug("Unable to perform a zero-copy read from offset {}" - + " of {}; {} bytes left in block. blockPos={}; curPos={};" - + "curEnd={}", - curPos, src, length63, blockPos, curPos, curEnd); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Unable to perform a zero-copy read from offset " + + curPos + " of " + src + "; " + length63 + " bytes left in block. " + + "blockPos=" + blockPos + "; curPos=" + curPos + + "; curEnd=" + curEnd); + } return null; } - DFSClient.LOG.debug("Reducing read length from {} to {} to avoid going " - + "more than one byte past the end of the block. blockPos={}; " - +" curPos={}; curEnd={}", - maxLength, length63, blockPos, curPos, curEnd); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Reducing read length from " + maxLength + + " to " + length63 + " to avoid going more than one byte " + + "past the end of the block. blockPos=" + blockPos + + "; curPos=" + curPos + "; curEnd=" + curEnd); + } } // Make sure that don't go beyond 31-bit offsets in the MappedByteBuffer. int length; @@ -1828,20 +1846,28 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, // So we can't mmap the parts of the block higher than the 2 GB offset. // FIXME: we could work around this with multiple memory maps. // See HDFS-5101. - DFSClient.LOG.debug("Unable to perform a zero-copy read from offset {} " - + " of {}; 31-bit MappedByteBuffer limit exceeded. blockPos={}, " - + "curEnd={}", curPos, src, blockPos, curEnd); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Unable to perform a zero-copy read from offset " + + curPos + " of " + src + "; 31-bit MappedByteBuffer limit " + + "exceeded. blockPos=" + blockPos + ", curEnd=" + curEnd); + } return null; } length = (int)length31; - DFSClient.LOG.debug("Reducing read length from {} to {} to avoid 31-bit " - + "limit. blockPos={}; curPos={}; curEnd={}", - maxLength, length, blockPos, curPos, curEnd); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("Reducing read length from " + maxLength + + " to " + length + " to avoid 31-bit limit. " + + "blockPos=" + blockPos + "; curPos=" + curPos + + "; curEnd=" + curEnd); + } } final ClientMmap clientMmap = blockReader.getClientMmap(opts); if (clientMmap == null) { - DFSClient.LOG.debug("unable to perform a zero-copy read from offset {} of" - + " {}; BlockReader#getClientMmap returned null.", curPos, src); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("unable to perform a zero-copy read from offset " + + curPos + " of " + src + "; BlockReader#getClientMmap returned " + + "null."); + } return null; } boolean success = false; @@ -1855,8 +1881,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, synchronized (infoLock) { readStatistics.addZeroCopyBytes(length); } - DFSClient.LOG.debug("readZeroCopy read {} bytes from offset {} via the " - + "zero-copy read path. blockEnd = {}", length, curPos, blockEnd); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("readZeroCopy read " + length + + " bytes from offset " + curPos + " via the zero-copy read " + + "path. blockEnd = " + blockEnd); + } success = true; } finally { if (!success) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 47c4b7e1529..7a40d737036 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -190,9 +190,9 @@ public class DFSOutputStream extends FSOutputSummer this.fileEncryptionInfo = stat.getFileEncryptionInfo(); this.cachingStrategy = new AtomicReference( dfsClient.getDefaultWriteCachingStrategy()); - if (progress != null) { - DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream " - +"{}", src); + if ((progress != null) && DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug( + "Set non-null progress callback on DFSOutputStream " + src); } this.bytesPerChecksum = checksum.getBytesPerChecksum(); @@ -365,9 +365,12 @@ public class DFSOutputStream extends FSOutputSummer final int chunkSize = csize + getChecksumSize(); chunksPerPacket = Math.max(bodySize/chunkSize, 1); packetSize = chunkSize*chunksPerPacket; - DFSClient.LOG.debug("computePacketChunkSize: src={}, chunkSize={}, " - + "chunksPerPacket={}, packetSize={}", - src, chunkSize, chunksPerPacket, packetSize); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("computePacketChunkSize: src=" + src + + ", chunkSize=" + chunkSize + + ", chunksPerPacket=" + chunksPerPacket + + ", packetSize=" + packetSize); + } } protected TraceScope createWriteTraceScope() { @@ -394,10 +397,14 @@ public class DFSOutputStream extends FSOutputSummer if (currentPacket == null) { currentPacket = createPacket(packetSize, chunksPerPacket, getStreamer() .getBytesCurBlock(), getStreamer().getAndIncCurrentSeqno(), false); - DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno={}," - + " src={}, packetSize={}, chunksPerPacket={}, bytesCurBlock={}", - currentPacket.getSeqno(), src, packetSize, chunksPerPacket, - getStreamer().getBytesCurBlock()); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno=" + + currentPacket.getSeqno() + + ", src=" + src + + ", packetSize=" + packetSize + + ", chunksPerPacket=" + chunksPerPacket + + ", bytesCurBlock=" + getStreamer().getBytesCurBlock()); + } } currentPacket.writeChecksum(checksum, ckoff, cklen); @@ -551,9 +558,12 @@ public class DFSOutputStream extends FSOutputSummer int numKept = flushBuffer(!endBlock, true); // bytesCurBlock potentially incremented if there was buffered data - DFSClient.LOG.debug("DFSClient flush(): bytesCurBlock={}, " - + "lastFlushOffset={}, createNewBlock={}", - getStreamer().getBytesCurBlock(), lastFlushOffset, endBlock); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("DFSClient flush(): " + + " bytesCurBlock=" + getStreamer().getBytesCurBlock() + + " lastFlushOffset=" + lastFlushOffset + + " createNewBlock=" + endBlock); + } // Flush only if we haven't already flushed till this offset. if (lastFlushOffset != getStreamer().getBytesCurBlock()) { assert getStreamer().getBytesCurBlock() > lastFlushOffset; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 71ce7cc0be9..e275afb33e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -462,13 +462,19 @@ public class DFSUtilClient { InetAddress addr = targetAddr.getAddress(); Boolean cached = localAddrMap.get(addr.getHostAddress()); if (cached != null) { - LOG.trace("Address {} is {} local", targetAddr, (cached ? "" : "not")); + if (LOG.isTraceEnabled()) { + LOG.trace("Address " + targetAddr + + (cached ? " is local" : " is not local")); + } return cached; } boolean local = NetUtils.isLocalAddress(addr); - LOG.trace("Address {} is {} local", targetAddr, (local ? "" : "not")); + if (LOG.isTraceEnabled()) { + LOG.trace("Address " + targetAddr + + (local ? " is local" : " is not local")); + } localAddrMap.put(addr.getHostAddress(), local); return local; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 4ea1f415a87..d1d8d37f3dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -132,14 +132,18 @@ class DataStreamer extends Daemon { final int length, final DFSClient client) throws IOException { final DfsClientConf conf = client.getConf(); final String dnAddr = first.getXferAddr(conf.isConnectToDnViaHostname()); - LOG.debug("Connecting to datanode {}", dnAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr); + } final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr); final Socket sock = client.socketFactory.createSocket(); final int timeout = client.getDatanodeReadTimeout(length); NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), conf.getSocketTimeout()); sock.setSoTimeout(timeout); sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); - LOG.debug("Send buf size {}", sock.getSendBufferSize()); + if (LOG.isDebugEnabled()) { + LOG.debug("Send buf size " + sock.getSendBufferSize()); + } return sock; } @@ -480,7 +484,9 @@ class DataStreamer extends Daemon { } private void endBlock() { - LOG.debug("Closing old block {}", block); + if(LOG.isDebugEnabled()) { + LOG.debug("Closing old block " + block); + } this.setName("DataStreamer for file " + src); closeResponder(); closeStream(); @@ -561,11 +567,15 @@ class DataStreamer extends Daemon { // get new block from namenode. if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) { - LOG.debug("Allocating new block"); + if(LOG.isDebugEnabled()) { + LOG.debug("Allocating new block"); + } setPipeline(nextBlockOutputStream()); initDataStreaming(); } else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) { - LOG.debug("Append to block {}", block); + if(LOG.isDebugEnabled()) { + LOG.debug("Append to block " + block); + } setupPipelineForAppendOrRecovery(); if (streamerClosed) { continue; @@ -617,7 +627,10 @@ class DataStreamer extends Daemon { } } - LOG.debug("DataStreamer block {} sending packet {}", block, one); + if (LOG.isDebugEnabled()) { + LOG.debug("DataStreamer block " + block + + " sending packet " + one); + } // write out data to remote datanode TraceScope writeScope = dfsClient.getTracer(). @@ -728,7 +741,9 @@ class DataStreamer extends Daemon { TraceScope scope = dfsClient.getTracer(). newScope("waitForAckedSeqno"); try { - LOG.debug("Waiting for ack for: {}", seqno); + if (LOG.isDebugEnabled()) { + LOG.debug("Waiting for ack for: " + seqno); + } long begin = Time.monotonicNow(); try { synchronized (dataQueue) { @@ -940,8 +955,8 @@ class DataStreamer extends Daemon { LOG.warn("Slow ReadProcessor read fields took " + duration + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms); ack: " + ack + ", targets: " + Arrays.asList(targets)); - } else { - LOG.debug("DFSClient {}", ack); + } else if (LOG.isDebugEnabled()) { + LOG.debug("DFSClient " + ack); } long seqno = ack.getSeqno(); @@ -1161,7 +1176,9 @@ class DataStreamer extends Daemon { } private void addDatanode2ExistingPipeline() throws IOException { - DataTransferProtocol.LOG.debug("lastAckedSeqno = {}", lastAckedSeqno); + if (DataTransferProtocol.LOG.isDebugEnabled()) { + DataTransferProtocol.LOG.debug("lastAckedSeqno = " + lastAckedSeqno); + } /* * Is data transfer necessary? We have the following cases. * @@ -1628,8 +1645,10 @@ class DataStreamer extends Daemon { new HashSet(Arrays.asList(favoredNodes)); for (int i = 0; i < nodes.length; i++) { pinnings[i] = favoredSet.remove(nodes[i].getXferAddrWithHostname()); - LOG.debug("{} was chosen by name node (favored={}).", - nodes[i].getXferAddrWithHostname(), pinnings[i]); + if (LOG.isDebugEnabled()) { + LOG.debug(nodes[i].getXferAddrWithHostname() + + " was chosen by name node (favored=" + pinnings[i] + ")."); + } } if (shouldLog && !favoredSet.isEmpty()) { // There is one or more favored nodes that were not allocated. @@ -1768,7 +1787,9 @@ class DataStreamer extends Daemon { packet.addTraceParent(Tracer.getCurrentSpanId()); dataQueue.addLast(packet); lastQueuedSeqno = packet.getSeqno(); - LOG.debug("Queued packet {}", packet.getSeqno()); + if (LOG.isDebugEnabled()) { + LOG.debug("Queued packet " + packet.getSeqno()); + } dataQueue.notifyAll(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index 017be9ffd87..15a5bee7dc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -250,7 +250,9 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader { PacketHeader header = new PacketHeader(); header.readFields(in); - LOG.debug("DFSClient readChunk got header {}", header); + if (LOG.isDebugEnabled()) { + LOG.debug("DFSClient readChunk got header " + header); + } // Sanity check the lengths if (!header.sanityCheck(lastSeqNo)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index ca31e6771d7..7a7932d1b6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -135,9 +135,14 @@ public class RemoteBlockReader2 implements BlockReader { @Override public synchronized int read(byte[] buf, int off, int len) throws IOException { - UUID randomId = (LOG.isTraceEnabled() ? UUID.randomUUID() : null); - LOG.trace("Starting read #{} file {} from datanode {}", - randomId, filename, datanodeID.getHostName()); + + UUID randomId = null; + if (LOG.isTraceEnabled()) { + randomId = UUID.randomUUID(); + LOG.trace(String.format("Starting read #%s file %s from datanode %s", + randomId.toString(), this.filename, + this.datanodeID.getHostName())); + } if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) { TraceScope scope = tracer.newScope( @@ -149,7 +154,9 @@ public class RemoteBlockReader2 implements BlockReader { } } - LOG.trace("Finishing read #{}", randomId); + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Finishing read #" + randomId)); + } if (curDataSlice.remaining() == 0) { // we're at EOF now @@ -196,7 +203,9 @@ public class RemoteBlockReader2 implements BlockReader { curDataSlice = packetReceiver.getDataSlice(); assert curDataSlice.capacity() == curHeader.getDataLen(); - LOG.trace("DFSClient readNextPacket got header {}", curHeader); + if (LOG.isTraceEnabled()) { + LOG.trace("DFSClient readNextPacket got header " + curHeader); + } // Sanity check the lengths if (!curHeader.sanityCheck(lastSeqNo)) { @@ -267,8 +276,10 @@ public class RemoteBlockReader2 implements BlockReader { } private void readTrailingEmptyPacket() throws IOException { - LOG.trace("Reading empty packet at end of read"); - + if (LOG.isTraceEnabled()) { + LOG.trace("Reading empty packet at end of read"); + } + packetReceiver.receiveNextPacket(in); PacketHeader trailer = packetReceiver.getHeader(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java index 8457d65027e..c3d2cfc95f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java @@ -308,7 +308,10 @@ public class LeaseRenewer { } LeaseRenewer.this.run(id); } catch(InterruptedException e) { - LOG.debug("LeaseRenewer is interrupted.", e); + if (LOG.isDebugEnabled()) { + LOG.debug(LeaseRenewer.this.getClass().getSimpleName() + + " is interrupted.", e); + } } finally { synchronized(LeaseRenewer.this) { Factory.INSTANCE.remove(LeaseRenewer.this); @@ -396,7 +399,9 @@ public class LeaseRenewer { } if (daemonCopy != null) { - LOG.debug("Wait for lease checker to terminate"); + if(LOG.isDebugEnabled()) { + LOG.debug("Wait for lease checker to terminate"); + } daemonCopy.join(); } } @@ -419,11 +424,16 @@ public class LeaseRenewer { //skip if current client name is the same as the previous name. if (!c.getClientName().equals(previousName)) { if (!c.renewLease()) { - LOG.debug("Did not renew lease for client {}", c); + if (LOG.isDebugEnabled()) { + LOG.debug("Did not renew lease for client " + + c); + } continue; } previousName = c.getClientName(); - LOG.debug("Lease renewed for client {}", previousName); + if (LOG.isDebugEnabled()) { + LOG.debug("Lease renewed for client " + previousName); + } } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java index e6709d9fd2d..c4093b1bd69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java @@ -147,9 +147,11 @@ public class PacketReceiver implements Closeable { throw new IOException("Invalid header length " + headerLen); } - LOG.trace("readNextPacket: dataPlusChecksumLen={}, headerLen={}", - dataPlusChecksumLen, headerLen); - + if (LOG.isTraceEnabled()) { + LOG.trace("readNextPacket: dataPlusChecksumLen = " + dataPlusChecksumLen + + " headerLen = " + headerLen); + } + // Sanity check the buffer size so we don't allocate too much memory // and OOME. int totalLen = payloadLen + headerLen; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java index d2bc3485cbb..e8562113b8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java @@ -73,8 +73,10 @@ public class Sender implements DataTransferProtocol { private static void send(final DataOutputStream out, final Op opcode, final Message proto) throws IOException { - LOG.trace("Sending DataTransferOp {}: {}", - proto.getClass().getSimpleName(), proto); + if (LOG.isTraceEnabled()) { + LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName() + + ": " + proto); + } op(out, opcode); proto.writeDelimitedTo(out); out.flush(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java index 006d304ad7a..256caffea3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java @@ -332,9 +332,11 @@ public final class DataTransferSaslUtil { public static IOStreamPair createStreamPair(Configuration conf, CipherOption cipherOption, OutputStream out, InputStream in, boolean isServer) throws IOException { - LOG.debug("Creating IOStreamPair of CryptoInputStream and " - + "CryptoOutputStream."); - CryptoCodec codec = CryptoCodec.getInstance(conf, + if (LOG.isDebugEnabled()) { + LOG.debug("Creating IOStreamPair of CryptoInputStream and " + + "CryptoOutputStream."); + } + CryptoCodec codec = CryptoCodec.getInstance(conf, cipherOption.getCipherSuite()); byte[] inKey = cipherOption.getInKey(); byte[] inIv = cipherOption.getInIv(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 24e1dd22031..f76427521fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -130,7 +130,9 @@ public class ClientDatanodeProtocolTranslatorPB implements throws IOException { final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); - LOG.debug("Connecting to datanode {} addr={}", dnAddr, addr); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); + } rpcProxy = createClientDatanodeProtocolProxy(addr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout); @@ -141,8 +143,10 @@ public class ClientDatanodeProtocolTranslatorPB implements boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname); InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr); - LOG.debug("Connecting to datanode {} addr={}", dnAddr, addr); - + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr); + } + // Since we're creating a new UserGroupInformation here, we know that no // future RPC proxies will be able to re-use the same connection. And // usages of this proxy tend to be one-off calls. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java index 4ffc1085762..f70398aecda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java @@ -129,13 +129,18 @@ public class DfsClientShmManager implements Closeable { ShmId shmId = shm.getShmId(); Slot slot = shm.allocAndRegisterSlot(blockId); if (shm.isFull()) { - LOG.trace("{}: pulled the last slot {} out of {}", - this, slot.getSlotIdx(), shm); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": pulled the last slot " + slot.getSlotIdx() + + " out of " + shm); + } DfsClientShm removedShm = notFull.remove(shmId); Preconditions.checkState(removedShm == shm); full.put(shmId, shm); } else { - LOG.trace("{}: pulled slot {} out of {}", this, slot.getSlotIdx(), shm); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": pulled slot " + slot.getSlotIdx() + + " out of " + shm); + } } return slot; } @@ -182,7 +187,9 @@ public class DfsClientShmManager implements Closeable { DfsClientShm shm = new DfsClientShm(PBHelperClient.convert(resp.getId()), fis[0], this, peer); - LOG.trace("{}: createNewShm: created {}", this, shm); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": createNewShm: created " + shm); + } return shm; } finally { try { @@ -227,11 +234,15 @@ public class DfsClientShmManager implements Closeable { String clientName, ExtendedBlockId blockId) throws IOException { while (true) { if (closed) { - LOG.trace("{}: the DfsClientShmManager has been closed.", this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": the DfsClientShmManager has been closed."); + } return null; } if (disabled) { - LOG.trace("{}: shared memory segment access is disabled.", this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": shared memory segment access is disabled."); + } return null; } // Try to use an existing slot. @@ -242,7 +253,9 @@ public class DfsClientShmManager implements Closeable { // There are no free slots. If someone is loading more slots, wait // for that to finish. if (loading) { - LOG.trace("{}: waiting for loading to finish...", this); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": waiting for loading to finish..."); + } finishedLoading.awaitUninterruptibly(); } else { // Otherwise, load the slot ourselves. @@ -269,9 +282,11 @@ public class DfsClientShmManager implements Closeable { // fired and marked the shm as disconnected. In this case, we // obviously don't want to add the SharedMemorySegment to our list // of valid not-full segments. - LOG.debug("{}: the UNIX domain socket associated with this " - + "short-circuit memory closed before we could make use of " - + "the shm.", this); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": the UNIX domain socket associated with " + + "this short-circuit memory closed before we could make " + + "use of the shm."); + } } else { notFull.put(shm.getShmId(), shm); } @@ -294,7 +309,9 @@ public class DfsClientShmManager implements Closeable { Preconditions.checkState(!full.containsKey(shm.getShmId())); Preconditions.checkState(!notFull.containsKey(shm.getShmId())); if (shm.isEmpty()) { - LOG.trace("{}: freeing empty stale {}", this, shm); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": freeing empty stale " + shm); + } shm.free(); } } else { @@ -319,8 +336,10 @@ public class DfsClientShmManager implements Closeable { // lowest ID, but it could still occur. In most workloads, // fragmentation should not be a major concern, since it doesn't impact // peak file descriptor usage or the speed of allocation. - LOG.trace("{}: shutting down UNIX domain socket for empty {}", - this, shm); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": shutting down UNIX domain socket for " + + "empty " + shm); + } shutdown(shm); } else { notFull.put(shmId, shm); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java index 07f50642c48..52c1a6eeb97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java @@ -103,7 +103,9 @@ public class ShortCircuitCache implements Closeable { if (ShortCircuitCache.this.closed) return; long curMs = Time.monotonicNow(); - LOG.debug("{}: cache cleaner running at {}", this, curMs); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": cache cleaner running at " + curMs); + } int numDemoted = demoteOldEvictableMmaped(curMs); int numPurged = 0; @@ -125,9 +127,11 @@ public class ShortCircuitCache implements Closeable { numPurged++; } - LOG.debug("{}: finishing cache cleaner run started at {}. Demoted {} " - + "mmapped replicas; purged {} replicas.", - this, curMs, numDemoted, numPurged); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": finishing cache cleaner run started at " + + curMs + ". Demoted " + numDemoted + " mmapped replicas; " + + "purged " + numPurged + " replicas."); + } } finally { ShortCircuitCache.this.lock.unlock(); } @@ -182,7 +186,9 @@ public class ShortCircuitCache implements Closeable { @Override public void run() { - LOG.trace("{}: about to release {}", ShortCircuitCache.this, slot); + if (LOG.isTraceEnabled()) { + LOG.trace(ShortCircuitCache.this + ": about to release " + slot); + } final DfsClientShm shm = (DfsClientShm)slot.getShm(); final DomainSocket shmSock = shm.getPeer().getDomainSocket(); final String path = shmSock.getPath(); @@ -199,7 +205,9 @@ public class ShortCircuitCache implements Closeable { String error = resp.hasError() ? resp.getError() : "(unknown)"; throw new IOException(resp.getStatus().toString() + ": " + error); } - LOG.trace("{}: released {}", this, slot); + if (LOG.isTraceEnabled()) { + LOG.trace(ShortCircuitCache.this + ": released " + slot); + } success = true; } catch (IOException e) { LOG.error(ShortCircuitCache.this + ": failed to release " + @@ -425,7 +433,9 @@ public class ShortCircuitCache implements Closeable { purgeReason = "purging replica because it is stale."; } if (purgeReason != null) { - LOG.debug("{}: {}", this, purgeReason); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": " + purgeReason); + } purge(replica); } } @@ -667,8 +677,10 @@ public class ShortCircuitCache implements Closeable { ShortCircuitReplicaInfo info = null; do { if (closed) { - LOG.trace("{}: can't fethchOrCreate {} because the cache is closed.", - this, key); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": can't fetchOrCreate " + key + + " because the cache is closed."); + } return null; } Waitable waitable = replicaInfoMap.get(key); @@ -676,7 +688,9 @@ public class ShortCircuitCache implements Closeable { try { info = fetch(key, waitable); } catch (RetriableException e) { - LOG.debug("{}: retrying {}", this, e.getMessage()); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": retrying " + e.getMessage()); + } continue; } } @@ -707,7 +721,9 @@ public class ShortCircuitCache implements Closeable { // ShortCircuitReplica. So we simply wait for it to complete. ShortCircuitReplicaInfo info; try { - LOG.trace("{}: found waitable for {}", this, key); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": found waitable for " + key); + } info = waitable.await(); } catch (InterruptedException e) { LOG.info(this + ": interrupted while waiting for " + key); @@ -749,7 +765,9 @@ public class ShortCircuitCache implements Closeable { // Handle loading a new replica. ShortCircuitReplicaInfo info = null; try { - LOG.trace("{}: loading {}", this, key); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": loading " + key); + } info = creator.createShortCircuitReplicaInfo(); } catch (RuntimeException e) { LOG.warn(this + ": failed to load " + key, e); @@ -759,7 +777,9 @@ public class ShortCircuitCache implements Closeable { try { if (info.getReplica() != null) { // On success, make sure the cache cleaner thread is running. - LOG.trace("{}: successfully loaded {}", this, info.getReplica()); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": successfully loaded " + info.getReplica()); + } startCacheCleanerThreadIfNeeded(); // Note: new ShortCircuitReplicas start with a refCount of 2, // indicating that both this cache and whoever requested the @@ -791,8 +811,10 @@ public class ShortCircuitCache implements Closeable { cleanerExecutor.scheduleAtFixedRate(cacheCleaner, rateMs, rateMs, TimeUnit.MILLISECONDS); cacheCleaner.setFuture(future); - LOG.debug("{}: starting cache cleaner thread which will run every {} ms", - this, rateMs); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": starting cache cleaner thread which will run " + + "every " + rateMs + " ms"); + } } } @@ -810,12 +832,17 @@ public class ShortCircuitCache implements Closeable { long lastAttemptTimeMs = (Long)replica.mmapData; long delta = Time.monotonicNow() - lastAttemptTimeMs; if (delta < mmapRetryTimeoutMs) { - LOG.trace("{}: can't create client mmap for {} because we failed to" - + " create one just {}ms ago.", this, replica, delta); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": can't create client mmap for " + + replica + " because we failed to " + + "create one just " + delta + "ms ago."); + } return null; } - LOG.trace("{}: retrying client mmap for {}, {} ms after the previous " - + "failure.", this, replica, delta); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": retrying client mmap for " + replica + + ", " + delta + " ms after the previous failure."); + } } else if (replica.mmapData instanceof Condition) { Condition cond = (Condition)replica.mmapData; cond.awaitUninterruptibly(); @@ -938,10 +965,38 @@ public class ShortCircuitCache implements Closeable { } } } - LOG.debug("visiting {} with outstandingMmapCount={}, replicas={}, " - + "failedLoads={}, evictable={}, evictableMmapped={}", - visitor.getClass().getName(), outstandingMmapCount, replicas, - failedLoads, evictable, evictableMmapped); + if (LOG.isDebugEnabled()) { + StringBuilder builder = new StringBuilder(); + builder.append("visiting ").append(visitor.getClass().getName()). + append("with outstandingMmapCount=").append(outstandingMmapCount). + append(", replicas="); + String prefix = ""; + for (Entry entry : replicas.entrySet()) { + builder.append(prefix).append(entry.getValue()); + prefix = ","; + } + prefix = ""; + builder.append(", failedLoads="); + for (Entry entry : failedLoads.entrySet()) { + builder.append(prefix).append(entry.getValue()); + prefix = ","; + } + prefix = ""; + builder.append(", evictable="); + for (Entry entry : evictable.entrySet()) { + builder.append(prefix).append(entry.getKey()). + append(":").append(entry.getValue()); + prefix = ","; + } + prefix = ""; + builder.append(", evictableMmapped="); + for (Entry entry : evictableMmapped.entrySet()) { + builder.append(prefix).append(entry.getKey()). + append(":").append(entry.getValue()); + prefix = ","; + } + LOG.debug(builder.toString()); + } visitor.visit(outstandingMmapCount, replicas, failedLoads, evictable, evictableMmapped); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java index 38cf22b61e4..37566e2ab60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java @@ -154,19 +154,25 @@ public class ShortCircuitReplica { // Check staleness by looking at the shared memory area we use to // communicate with the DataNode. boolean stale = !slot.isValid(); - LOG.trace("{}: checked shared memory segment. isStale={}", this, stale); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": checked shared memory segment. isStale=" + stale); + } return stale; } else { // Fall back to old, time-based staleness method. long deltaMs = Time.monotonicNow() - creationTimeMs; long staleThresholdMs = cache.getStaleThresholdMs(); if (deltaMs > staleThresholdMs) { - LOG.trace("{} is stale because it's {} ms old and staleThreadholdMS={}", - this, deltaMs, staleThresholdMs); + if (LOG.isTraceEnabled()) { + LOG.trace(this + " is stale because it's " + deltaMs + + " ms old, and staleThresholdMs = " + staleThresholdMs); + } return true; } else { - LOG.trace("{} is not stale because it's only {} ms old " - + "and staleThresholdMs={}", this, deltaMs, staleThresholdMs); + if (LOG.isTraceEnabled()) { + LOG.trace(this + " is not stale because it's only " + deltaMs + + " ms old, and staleThresholdMs = " + staleThresholdMs); + } return false; } } @@ -188,8 +194,13 @@ public class ShortCircuitReplica { return false; } boolean result = slot.addAnchor(); - LOG.trace("{}: {} no-checksum anchor to slot {}", - this, result ? "added" : "could not add", slot); + if (LOG.isTraceEnabled()) { + if (result) { + LOG.trace(this + ": added no-checksum anchor to slot " + slot); + } else { + LOG.trace(this + ": could not add no-checksum anchor to slot " + slot); + } + } return result; } @@ -252,7 +263,9 @@ public class ShortCircuitReplica { suffix += " scheduling " + slot + " for later release."; } } - LOG.trace("closed {}{}", this, suffix); + if (LOG.isTraceEnabled()) { + LOG.trace("closed " + this + suffix); + } } public FileInputStream getDataStream() { @@ -280,7 +293,9 @@ public class ShortCircuitReplica { FileChannel channel = dataStream.getChannel(); MappedByteBuffer mmap = channel.map(MapMode.READ_ONLY, 0, Math.min(Integer.MAX_VALUE, channel.size())); - LOG.trace("{}: created mmap of size {}", this, channel.size()); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": created mmap of size " + channel.size()); + } return mmap; } catch (IOException e) { LOG.warn(this + ": mmap error", e); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java index fa40c153c27..78325a389b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java @@ -484,9 +484,13 @@ public class ShortCircuitShm { POSIX.MMAP_PROT_READ | POSIX.MMAP_PROT_WRITE, true, mmappedLength); this.slots = new Slot[mmappedLength / BYTES_PER_SLOT]; this.allocatedSlots = new BitSet(slots.length); - LOG.trace("creating {}(shmId={}, mmappedLength={}, baseAddress={}, " - + "slots.length={})", this.getClass().getSimpleName(), shmId, - mmappedLength, String.format("%x", baseAddress), slots.length); + if (LOG.isTraceEnabled()) { + LOG.trace("creating " + this.getClass().getSimpleName() + + "(shmId=" + shmId + + ", mmappedLength=" + mmappedLength + + ", baseAddress=" + String.format("%x", baseAddress) + + ", slots.length=" + slots.length + ")"); + } } public final ShmId getShmId() { @@ -611,7 +615,9 @@ public class ShortCircuitShm { "tried to unregister slot " + slotIdx + ", which was not registered."); allocatedSlots.set(slotIdx, false); slots[slotIdx] = null; - LOG.trace("{}: unregisterSlot {}", this, slotIdx); + if (LOG.isTraceEnabled()) { + LOG.trace(this + ": unregisterSlot " + slotIdx); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java index e361252dd2b..a9adb7e2f71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java @@ -36,6 +36,18 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public abstract class ByteArrayManager { static final Logger LOG = LoggerFactory.getLogger(ByteArrayManager.class); + private static final ThreadLocal DEBUG_MESSAGE = + new ThreadLocal() { + protected StringBuilder initialValue() { + return new StringBuilder(); + } + }; + + private static void logDebugMessage() { + final StringBuilder b = DEBUG_MESSAGE.get(); + LOG.debug(b.toString()); + b.setLength(0); + } static final int MIN_ARRAY_LENGTH = 32; static final byte[] EMPTY_BYTE_ARRAY = {}; @@ -148,18 +160,27 @@ public abstract class ByteArrayManager { * via the {@link FixedLengthManager#recycle(byte[])} method. */ synchronized byte[] allocate() throws InterruptedException { - LOG.debug(", {}", this); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append(", ").append(this); + } for(; numAllocated >= maxAllocated;) { - LOG.debug(": wait ..."); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append(": wait ..."); + logDebugMessage(); + } wait(); - LOG.debug("wake up: {}", this); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append("wake up: ").append(this); + } } numAllocated++; final byte[] array = freeQueue.poll(); - LOG.debug(", recycled? {}", array != null); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append(", recycled? ").append(array != null); + } return array != null? array : new byte[byteArrayLength]; } @@ -173,7 +194,9 @@ public abstract class ByteArrayManager { synchronized int recycle(byte[] array) { Preconditions.checkNotNull(array); Preconditions.checkArgument(array.length == byteArrayLength); - LOG.debug(", {}", this); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append(", ").append(this); + } notify(); numAllocated--; @@ -184,7 +207,9 @@ public abstract class ByteArrayManager { } if (freeQueue.size() < maxAllocated - numAllocated) { - LOG.debug(", freeQueue.offer"); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append(", freeQueue.offer"); + } freeQueue.offer(array); } return freeQueue.size(); @@ -324,7 +349,9 @@ public abstract class ByteArrayManager { public byte[] newByteArray(final int arrayLength) throws InterruptedException { Preconditions.checkArgument(arrayLength >= 0); - LOG.debug("allocate({})", arrayLength); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append("allocate(").append(arrayLength).append(")"); + } final byte[] array; if (arrayLength == 0) { @@ -338,12 +365,18 @@ public abstract class ByteArrayManager { final FixedLengthManager manager = managers.get(powerOfTwo, aboveThreshold); - LOG.debug(": count={}, {}Threshold", count, - aboveThreshold ? "above" : "below"); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append(": count=").append(count) + .append(aboveThreshold? ", aboveThreshold": ", belowThreshold"); + } array = manager != null? manager.allocate(): new byte[powerOfTwo]; } - LOG.debug(", return byte[{}]", array.length); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append(", return byte[") + .append(array.length).append("]"); + logDebugMessage(); + } return array; } @@ -358,7 +391,10 @@ public abstract class ByteArrayManager { @Override public int release(final byte[] array) { Preconditions.checkNotNull(array); - LOG.debug("recycle: array.length={}", array.length); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get() + .append("recycle: array.length=").append(array.length); + } final int freeQueueSize; if (array.length == 0) { @@ -368,7 +404,10 @@ public abstract class ByteArrayManager { freeQueueSize = manager == null? -1: manager.recycle(array); } - LOG.debug(", freeQueueSize={}", freeQueueSize); + if (LOG.isDebugEnabled()) { + DEBUG_MESSAGE.get().append(", freeQueueSize=").append(freeQueueSize); + logDebugMessage(); + } return freeQueueSize; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java index 870103eddf0..a864d37bbea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java @@ -134,7 +134,9 @@ final class TokenAspect { if (token != null) { fs.setDelegationToken(token); addRenewAction(fs); - LOG.debug("Created new DT for {}", token.getService()); + if (LOG.isDebugEnabled()) { + LOG.debug("Created new DT for {}", token.getService()); + } } hasInitedToken = true; } @@ -147,7 +149,9 @@ final class TokenAspect { synchronized void initDelegationToken(UserGroupInformation ugi) { Token token = selectDelegationToken(ugi); if (token != null) { - LOG.debug("Found existing DT for {}", token.getService()); + if (LOG.isDebugEnabled()) { + LOG.debug("Found existing DT for {}", token.getService()); + } fs.setDelegationToken(token); hasInitedToken = true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java index be5f17dc3c6..4c2324152af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java @@ -182,7 +182,9 @@ public class URLConnectionFactory { public URLConnection openConnection(URL url, boolean isSpnego) throws IOException, AuthenticationException { if (isSpnego) { - LOG.debug("open AuthenticatedURL connection {}", url); + if (LOG.isDebugEnabled()) { + LOG.debug("open AuthenticatedURL connection {}", url); + } UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab(); final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); return new AuthenticatedURL(new KerberosUgiAuthenticator(), diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index e12274861f3..e245d2ace40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -233,12 +233,16 @@ public class WebHdfsFileSystem extends FileSystem // refetch tokens. even if ugi has credentials, don't attempt // to get another token to match hdfs/rpc behavior if (token != null) { - LOG.debug("Using UGI token: {}", token); + if(LOG.isDebugEnabled()) { + LOG.debug("Using UGI token: {}", token); + } canRefreshDelegationToken = false; } else { token = getDelegationToken(null); if (token != null) { - LOG.debug("Fetched new token: {}", token); + if(LOG.isDebugEnabled()) { + LOG.debug("Fetched new token: {}", token); + } } else { // security is disabled canRefreshDelegationToken = false; } @@ -253,7 +257,9 @@ public class WebHdfsFileSystem extends FileSystem boolean replaced = false; if (canRefreshDelegationToken) { Token token = getDelegationToken(null); - LOG.debug("Replaced expired token: {}", token); + if(LOG.isDebugEnabled()) { + LOG.debug("Replaced expired token: {}", token); + } setDelegationToken(token); replaced = (token != null); } @@ -436,7 +442,9 @@ public class WebHdfsFileSystem extends FileSystem InetSocketAddress nnAddr = getCurrentNNAddr(); final URL url = new URL(getTransportScheme(), nnAddr.getHostName(), nnAddr.getPort(), path + '?' + query); - LOG.trace("url={}", url); + if (LOG.isTraceEnabled()) { + LOG.trace("url={}", url); + } return url; } @@ -471,7 +479,9 @@ public class WebHdfsFileSystem extends FileSystem + Param.toSortedString("&", getAuthParameters(op)) + Param.toSortedString("&", parameters); final URL url = getNamenodeURL(path, query); - LOG.trace("url={}", url); + if (LOG.isTraceEnabled()) { + LOG.trace("url={}", url); + } return url; } @@ -759,7 +769,9 @@ public class WebHdfsFileSystem extends FileSystem } catch (Exception e) { // catch json parser errors final IOException ioe = new IOException("Response decoding failure: "+e.toString(), e); - LOG.debug("Response decoding failure.", e); + if (LOG.isDebugEnabled()) { + LOG.debug("Response decoding failure: {}", e.toString(), e); + } throw ioe; } finally { conn.disconnect(); @@ -1230,7 +1242,9 @@ public class WebHdfsFileSystem extends FileSystem cancelDelegationToken(delegationToken); } } catch (IOException ioe) { - LOG.debug("Token cancel failed: ", ioe); + if (LOG.isDebugEnabled()) { + LOG.debug("Token cancel failed: ", ioe); + } } finally { super.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index dfd0b575d85..7b62b97500a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1000,9 +1000,6 @@ Release 2.8.0 - UNRELEASED HDFS-8696. Make the lower and higher watermark in the DN Netty server configurable. (Xiaobing Zhou via wheat9) - HDFS-8971. Remove guards when calling LOG.debug() and LOG.trace() in client - package. (Mingliang Liu via wheat9) - OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than