diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java index 2c0f9f2c7fd..a9c1fa4a423 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java @@ -59,7 +59,7 @@ public class RecoveredReplicationSourceShipper extends ReplicationSourceShipper source.locateRecoveredPaths(walGroupId); break; } catch (IOException e) { - LOG.error("Error while locating recovered queue paths, attempt #" + numRetries); + LOG.error("Error while locating recovered queue paths, attempt #" + numRetries, e); numRetries++; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 9baf95480df..d1268fab94c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -645,8 +645,8 @@ public class ReplicationSource implements ReplicationSourceInterface { if (cause == null) { LOG.info("{} Closing source {} because: {}", logPeerId(), this.queueId, reason); } else { - LOG.error("{} Closing source {} because an error occurred: {}", - logPeerId(), this.queueId, reason, cause); + LOG.error(String.format("%s Closing source %s because an error occurred: %s", + logPeerId(), this.queueId, reason), cause); } this.sourceRunning = false; if (initThread != null && Thread.currentThread() != initThread) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index a276b7815e6..38dc214ba5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -76,7 +76,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -925,8 +924,8 @@ public class ReplicationSourceManager implements ReplicationListener { LOG.error(String.format("ReplicationException: cannot claim dead region (%s)'s " + "replication queue. Znode : (%s)" + " Possible solution: check if znode size exceeds jute.maxBuffer value. " + - " If so, increase it for both client and server side." + e), deadRS, - queueStorage.getRsNode(deadRS)); + " If so, increase it for both client and server side.", + deadRS, queueStorage.getRsNode(deadRS)), e); server.abort("Failed to claim queue from dead regionserver.", e); return; } @@ -1202,7 +1201,7 @@ public class ReplicationSourceManager implements ReplicationListener { * This ReplicationSource is NOT created via {@link ReplicationSourceFactory}. * @see #addSource(String) This is a specialization of the addSource call. * @see #catalogReplicationSource for a note on this ReplicationSource's lifecycle (and more on - * why the special handling). + * why the special handling). */ private ReplicationSourceInterface createCatalogReplicationSource(RegionInfo regionInfo) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index 57f4cba5c06..3631cad2962 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -172,7 +172,7 @@ class ReplicationSourceWALReader extends Thread { } } catch (IOException e) { if (sleepMultiplier < maxRetriesMultiplier) { - LOG.debug("Failed to read stream of replication entries: " + e); + LOG.debug("Failed to read stream of replication entries: ", e); sleepMultiplier++; } else { LOG.error("Failed to read stream of replication entries", e); @@ -306,7 +306,7 @@ class ReplicationSourceWALReader extends Thread { return true; } } catch (IOException ioe) { - LOG.warn("Couldn't get file length information about log {}", queue.peek()); + LOG.warn("Couldn't get file length information about log " + queue.peek(), ioe); } } return false; @@ -420,7 +420,7 @@ class ReplicationSourceWALReader extends Thread { } } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " - + "Then its hfiles count will not be added into metric."); + + "Then its hfiles count will not be added into metric.", e); } } @@ -457,8 +457,7 @@ class ReplicationSourceWALReader extends Thread { } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " + "Size of HFiles part of cell will not be considered in replication " - + "request size calculation.", - e); + + "request size calculation.", e); } } }