HDFS-1977. Stop using StringUtils.stringifyException(). Contributed by Bharath Mundlapudi.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1145834 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jitendra Nath Pandey 2011-07-13 01:11:01 +00:00
parent faf8747e3e
commit 2c5dd549e3
18 changed files with 57 additions and 85 deletions

View File

@ -546,6 +546,9 @@ Trunk (unreleased changes)
HDFS-2134. Move DecommissionManager to the blockmanagement package. HDFS-2134. Move DecommissionManager to the blockmanagement package.
(szetszwo) (szetszwo)
HDFS-1977. Stop using StringUtils.stringifyException().
(Bharath Mundlapudi via jitendra)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

View File

@ -101,7 +101,6 @@
import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.StringUtils;
/******************************************************** /********************************************************
* DFSClient can connect to a Hadoop Filesystem and * DFSClient can connect to a Hadoop Filesystem and
@ -489,8 +488,7 @@ public long getBlockSize(String f) throws IOException {
try { try {
return namenode.getPreferredBlockSize(f); return namenode.getPreferredBlockSize(f);
} catch (IOException ie) { } catch (IOException ie) {
LOG.warn("Problem getting block size: " + LOG.warn("Problem getting block size", ie);
StringUtils.stringifyException(ie));
throw ie; throw ie;
} }
} }
@ -1578,9 +1576,8 @@ void reportChecksumFailure(String file, LocatedBlock lblocks[]) {
try { try {
reportBadBlocks(lblocks); reportBadBlocks(lblocks);
} catch (IOException ie) { } catch (IOException ie) {
LOG.info("Found corruption while reading " + file LOG.info("Found corruption while reading " + file
+ ". Error repairing corrupt blocks. Bad blocks remain. " + ". Error repairing corrupt blocks. Bad blocks remain.", ie);
+ StringUtils.stringifyException(ie));
} }
} }

View File

@ -47,7 +47,6 @@
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
/**************************************************************** /****************************************************************
* DFSInputStream provides bytes from a named file. It handles * DFSInputStream provides bytes from a named file. It handles
@ -499,7 +498,7 @@ private synchronized int readBuffer(byte buf[], int off, int len,
if (!retryCurrentNode) { if (!retryCurrentNode) {
DFSClient.LOG.warn("Exception while reading from " DFSClient.LOG.warn("Exception while reading from "
+ getCurrentBlock() + " of " + src + " from " + getCurrentBlock() + " of " + src + " from "
+ currentNode + ": " + StringUtils.stringifyException(e)); + currentNode, e);
} }
ioe = e; ioe = e;
} }
@ -557,7 +556,7 @@ public synchronized int read(byte buf[], int off, int len) throws IOException {
throw ce; throw ce;
} catch (IOException e) { } catch (IOException e) {
if (retries == 1) { if (retries == 1) {
DFSClient.LOG.warn("DFS Read: " + StringUtils.stringifyException(e)); DFSClient.LOG.warn("DFS Read", e);
} }
blockEnd = -1; blockEnd = -1;
if (currentNode != null) { addToDeadNodes(currentNode); } if (currentNode != null) { addToDeadNodes(currentNode); }
@ -931,9 +930,8 @@ public synchronized void seek(long targetPos) throws IOException {
} catch (IOException e) {//make following read to retry } catch (IOException e) {//make following read to retry
if(DFSClient.LOG.isDebugEnabled()) { if(DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Exception while seek to " + targetPos DFSClient.LOG.debug("Exception while seek to " + targetPos
+ " from " + getCurrentBlock() + " of " + src + " from " + getCurrentBlock() + " of " + src + " from "
+ " from " + currentNode + ": " + currentNode, e);
+ StringUtils.stringifyException(e));
} }
} }
} }

View File

@ -75,8 +75,6 @@
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.PureJavaCrc32; import org.apache.hadoop.util.PureJavaCrc32;
import org.apache.hadoop.util.StringUtils;
/**************************************************************** /****************************************************************
@ -548,8 +546,7 @@ public void run() {
Thread.sleep(artificialSlowdown); Thread.sleep(artificialSlowdown);
} }
} catch (Throwable e) { } catch (Throwable e) {
DFSClient.LOG.warn("DataStreamer Exception: " + DFSClient.LOG.warn("DataStreamer Exception", e);
StringUtils.stringifyException(e));
if (e instanceof IOException) { if (e instanceof IOException) {
setLastException((IOException)e); setLastException((IOException)e);
} }
@ -698,9 +695,8 @@ public void run() {
synchronized (dataQueue) { synchronized (dataQueue) {
dataQueue.notifyAll(); dataQueue.notifyAll();
} }
DFSClient.LOG.warn("DFSOutputStream ResponseProcessor exception " + DFSClient.LOG.warn("DFSOutputStream ResponseProcessor exception "
" for block " + block + + " for block " + block, e);
StringUtils.stringifyException(e));
responderClosed = true; responderClosed = true;
} }
} }
@ -1101,7 +1097,7 @@ private LocatedBlock locateFollowingBlock(long start,
throw e; throw e;
} else { } else {
--retries; --retries;
DFSClient.LOG.info(StringUtils.stringifyException(e)); DFSClient.LOG.info("Exception while adding a block", e);
if (System.currentTimeMillis() - localstart > 5000) { if (System.currentTimeMillis() - localstart > 5000) {
DFSClient.LOG.info("Waiting for replication for " DFSClient.LOG.info("Waiting for replication for "
+ (System.currentTimeMillis() - localstart) / 1000 + (System.currentTimeMillis() - localstart) / 1000

View File

@ -729,7 +729,7 @@ private void dispatchBlocks() {
blocksToReceive -= getBlockList(); blocksToReceive -= getBlockList();
continue; continue;
} catch (IOException e) { } catch (IOException e) {
LOG.warn(StringUtils.stringifyException(e)); LOG.warn("Exception while getting block list", e);
return; return;
} }
} }
@ -1553,7 +1553,7 @@ public static void main(String[] args) {
try { try {
System.exit(ToolRunner.run(null, new Cli(), args)); System.exit(ToolRunner.run(null, new Cli(), args));
} catch (Throwable e) { } catch (Throwable e) {
LOG.error(StringUtils.stringifyException(e)); LOG.error("Exiting balancer due an exception", e);
System.exit(-1); System.exit(-1);
} }
} }

View File

@ -52,7 +52,6 @@
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
/** /**
* The class provides utilities for {@link Balancer} to access a NameNode * The class provides utilities for {@link Balancer} to access a NameNode
@ -222,7 +221,7 @@ public void run() {
try { try {
blockTokenSecretManager.setKeys(namenode.getBlockKeys()); blockTokenSecretManager.setKeys(namenode.getBlockKeys());
} catch (Exception e) { } catch (Exception e) {
LOG.error(StringUtils.stringifyException(e)); LOG.error("Failed to set keys", e);
} }
try { try {
Thread.sleep(keyUpdaterInterval); Thread.sleep(keyUpdaterInterval);

View File

@ -49,7 +49,6 @@
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume; import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
/** /**
* Performs two types of scanning: * Performs two types of scanning:
@ -434,9 +433,8 @@ private void verifyBlock(ExtendedBlock block) {
return; return;
} }
LOG.warn((second ? "Second " : "First ") + LOG.warn((second ? "Second " : "First ") + "Verification failed for "
"Verification failed for " + block + ". Exception : " + + block, e);
StringUtils.stringifyException(e));
if (second) { if (second) {
totalScanErrors++; totalScanErrors++;
@ -512,8 +510,7 @@ private boolean assignInitialVerificationTimes() {
logReader[1] = log.getPreviousFileReader(); logReader[1] = log.getPreviousFileReader();
} }
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Could not read previous verification times : " + LOG.warn("Could not read previous verification times", e);
StringUtils.stringifyException(e));
} }
try { try {
@ -645,8 +642,7 @@ public void scan() {
} }
} }
} catch (RuntimeException e) { } catch (RuntimeException e) {
LOG.warn("RuntimeException during BlockPoolScanner.scan() : " + LOG.warn("RuntimeException during BlockPoolScanner.scan()", e);
StringUtils.stringifyException(e));
throw e; throw e;
} finally { } finally {
cleanUp(); cleanUp();
@ -910,8 +906,7 @@ public String next() {
try { try {
readNext(); readNext();
} catch (IOException e) { } catch (IOException e) {
LOG.info("Could not reade next line in LogHandler : " + LOG.info("Could not read next line in LogHandler", e);
StringUtils.stringifyException(e));
} }
return curLine; return curLine;
} }

View File

@ -49,7 +49,6 @@
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.PureJavaCrc32; import org.apache.hadoop.util.PureJavaCrc32;
import org.apache.hadoop.util.StringUtils;
/** A class that receives a block and writes to its own disk, meanwhile /** A class that receives a block and writes to its own disk, meanwhile
* may copies it to another site. If a throttler is provided, * may copies it to another site. If a throttler is provided,
@ -276,9 +275,8 @@ void flush() throws IOException {
*/ */
private void handleMirrorOutError(IOException ioe) throws IOException { private void handleMirrorOutError(IOException ioe) throws IOException {
String bpid = block.getBlockPoolId(); String bpid = block.getBlockPoolId();
LOG.info(datanode.getDNRegistrationForBP(bpid) + ":Exception writing block " + LOG.info(datanode.getDNRegistrationForBP(bpid)
block + " to mirror " + mirrorAddr + "\n" + + ":Exception writing block " + block + " to mirror " + mirrorAddr, ioe);
StringUtils.stringifyException(ioe));
if (Thread.interrupted()) { // shut down if the thread is interrupted if (Thread.interrupted()) { // shut down if the thread is interrupted
throw ioe; throw ioe;
} else { // encounter an error while writing to mirror } else { // encounter an error while writing to mirror

View File

@ -38,7 +38,6 @@
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.SocketOutputStream; import org.apache.hadoop.net.SocketOutputStream;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
/** /**
* Reads a block from the disk and sends it to a recipient. * Reads a block from the disk and sends it to a recipient.
@ -328,9 +327,8 @@ private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out)
try { try {
checksumIn.readFully(buf, checksumOff, checksumLen); checksumIn.readFully(buf, checksumOff, checksumLen);
} catch (IOException e) { } catch (IOException e) {
LOG.warn(" Could not read or failed to veirfy checksum for data" + LOG.warn(" Could not read or failed to veirfy checksum for data"
" at offset " + offset + " for block " + block + " got : " + " at offset " + offset + " for block " + block, e);
+ StringUtils.stringifyException(e));
IOUtils.closeStream(checksumIn); IOUtils.closeStream(checksumIn);
checksumIn = null; checksumIn = null;
if (corruptChecksumOk) { if (corruptChecksumOk) {

View File

@ -861,8 +861,8 @@ private void reportBadBlocks(ExtendedBlock block) {
/* One common reason is that NameNode could be in safe mode. /* One common reason is that NameNode could be in safe mode.
* Should we keep on retrying in that case? * Should we keep on retrying in that case?
*/ */
LOG.warn("Failed to report bad block " + block + " to namenode : " + LOG.warn("Failed to report bad block " + block + " to namenode : "
" Exception : " + StringUtils.stringifyException(e)); + " Exception", e);
} }
} }
@ -1111,12 +1111,11 @@ private void offerService() throws Exception {
if (UnregisteredNodeException.class.getName().equals(reClass) || if (UnregisteredNodeException.class.getName().equals(reClass) ||
DisallowedDatanodeException.class.getName().equals(reClass) || DisallowedDatanodeException.class.getName().equals(reClass) ||
IncorrectVersionException.class.getName().equals(reClass)) { IncorrectVersionException.class.getName().equals(reClass)) {
LOG.warn("blockpool " + blockPoolId + " is shutting down: " + LOG.warn("blockpool " + blockPoolId + " is shutting down", re);
StringUtils.stringifyException(re));
shouldServiceRun = false; shouldServiceRun = false;
return; return;
} }
LOG.warn(StringUtils.stringifyException(re)); LOG.warn("RemoteException in offerService", re);
try { try {
long sleepTime = Math.min(1000, heartBeatInterval); long sleepTime = Math.min(1000, heartBeatInterval);
Thread.sleep(sleepTime); Thread.sleep(sleepTime);
@ -1124,7 +1123,7 @@ private void offerService() throws Exception {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
} }
} catch (IOException e) { } catch (IOException e) {
LOG.warn(StringUtils.stringifyException(e)); LOG.warn("IOException in offerService", e);
} }
} // while (shouldRun && shouldServiceRun) } // while (shouldRun && shouldServiceRun)
} // offerService } // offerService
@ -1241,18 +1240,18 @@ public void run() {
startDistributedUpgradeIfNeeded(); startDistributedUpgradeIfNeeded();
offerService(); offerService();
} catch (Exception ex) { } catch (Exception ex) {
LOG.error("Exception: " + StringUtils.stringifyException(ex)); LOG.error("Exception in BPOfferService", ex);
if (shouldRun && shouldServiceRun) { if (shouldRun && shouldServiceRun) {
try { try {
Thread.sleep(5000); Thread.sleep(5000);
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
LOG.warn("Received exception: ", ie); LOG.warn("Received exception", ie);
} }
} }
} }
} }
} catch (Throwable ex) { } catch (Throwable ex) {
LOG.warn("Unexpected exception ", ex); LOG.warn("Unexpected exception", ex);
} finally { } finally {
LOG.warn(bpRegistration + " ending block pool service for: " LOG.warn(bpRegistration + " ending block pool service for: "
+ blockPoolId); + blockPoolId);
@ -1737,8 +1736,7 @@ private void handleDiskError(String errMsgr) {
try { try {
nn.errorReport(bpos.bpRegistration, dpError, errMsgr); nn.errorReport(bpos.bpRegistration, dpError, errMsgr);
} catch(IOException e) { } catch(IOException e) {
LOG.warn("Error reporting disk failure to NameNode: " + LOG.warn("Error reporting disk failure to NameNode", e);
StringUtils.stringifyException(e));
} }
} }
@ -2007,8 +2005,9 @@ public void run() {
} }
} }
} catch (IOException ie) { } catch (IOException ie) {
LOG.warn(bpReg + ":Failed to transfer " + b + " to " + targets[0].getName() LOG.warn(
+ " got " + StringUtils.stringifyException(ie)); bpReg + ":Failed to transfer " + b + " to " + targets[0].getName()
+ " got ", ie);
// check if there are any disk problem // check if there are any disk problem
checkDiskError(); checkDiskError();
@ -2279,7 +2278,7 @@ public static void secureMain(String args[], SecureResources resources) {
if (datanode != null) if (datanode != null)
datanode.join(); datanode.join();
} catch (Throwable e) { } catch (Throwable e) {
LOG.error(StringUtils.stringifyException(e)); LOG.error("Exception in secureMain", e);
System.exit(-1); System.exit(-1);
} finally { } finally {
// We need to add System.exit here because either shutdown was called or // We need to add System.exit here because either shutdown was called or

View File

@ -173,8 +173,8 @@ synchronized void recoverTransitionRead(DataNode datanode,
} }
} catch (IOException ioe) { } catch (IOException ioe) {
sd.unlock(); sd.unlock();
LOG.warn("Ignoring storage directory "+ dataDir LOG.warn("Ignoring storage directory " + dataDir
+ " due to an exception: " + StringUtils.stringifyException(ioe)); + " due to an exception", ioe);
//continue with other good dirs //continue with other good dirs
continue; continue;
} }

View File

@ -62,7 +62,6 @@
import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
@ -268,10 +267,8 @@ public void readBlock(final ExtendedBlock block,
/* What exactly should we do here? /* What exactly should we do here?
* Earlier version shutdown() datanode if there is disk error. * Earlier version shutdown() datanode if there is disk error.
*/ */
LOG.warn(dnR + ":Got exception while serving " + LOG.warn(dnR + ":Got exception while serving " + block + " to "
block + " to " + + remoteAddress, ioe);
remoteAddress + ":\n" +
StringUtils.stringifyException(ioe) );
throw ioe; throw ioe;
} finally { } finally {
IOUtils.closeStream(blockSender); IOUtils.closeStream(blockSender);
@ -424,8 +421,7 @@ public void writeBlock(final ExtendedBlock block,
} else { } else {
LOG.info(datanode + ":Exception transfering block " + LOG.info(datanode + ":Exception transfering block " +
block + " to mirror " + mirrorNode + block + " to mirror " + mirrorNode +
". continuing without the mirror.\n" + ". continuing without the mirror.", e);
StringUtils.stringifyException(e));
} }
} }
} }

View File

@ -34,7 +34,6 @@
import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
/** /**
@ -157,7 +156,7 @@ public void run() {
ss.close(); ss.close();
} catch (IOException ie) { } catch (IOException ie) {
LOG.warn(datanode.getMachineName() LOG.warn(datanode.getMachineName()
+ ":DataXceiverServer: Close exception due to: ", ie); + " :DataXceiverServer: close exception", ie);
} }
} }
@ -167,8 +166,7 @@ void kill() {
try { try {
this.ss.close(); this.ss.close();
} catch (IOException ie) { } catch (IOException ie) {
LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): " LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): ", ie);
+ StringUtils.stringifyException(ie));
} }
// close all the sockets that were accepted earlier // close all the sockets that were accepted earlier

View File

@ -62,7 +62,6 @@
import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
@ -2377,8 +2376,7 @@ public void checkAndUpdate(String bpid, long blockId, File diskFile,
try { try {
datanode.reportBadBlocks(new ExtendedBlock(bpid, corruptBlock)); datanode.reportBadBlocks(new ExtendedBlock(bpid, corruptBlock));
} catch (IOException e) { } catch (IOException e) {
DataNode.LOG.warn("Failed to repot bad block " + corruptBlock DataNode.LOG.warn("Failed to repot bad block " + corruptBlock, e);
+ "Exception:" + StringUtils.stringifyException(e));
} }
} }
} }

View File

@ -24,7 +24,6 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.util.StringUtils;
import java.io.IOException; import java.io.IOException;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
@ -110,7 +109,7 @@ public void run() {
try { try {
doUpgrade(); doUpgrade();
} catch(Exception e) { } catch(Exception e) {
DataNode.LOG.error(StringUtils.stringifyException(e)); DataNode.LOG.error("Exception in doUpgrade", e);
} }
break; break;
} }
@ -129,7 +128,7 @@ public void run() {
if(upgradeManager != null) if(upgradeManager != null)
upgradeManager.completeUpgrade(); upgradeManager.completeUpgrade();
} catch(IOException e) { } catch(IOException e) {
DataNode.LOG.error(StringUtils.stringifyException(e)); DataNode.LOG.error("Exception in completeUpgrade", e);
} }
} }

View File

@ -3216,7 +3216,7 @@ public void run() {
lastBlockKeyUpdate = now; lastBlockKeyUpdate = now;
} }
} catch (Exception e) { } catch (Exception e) {
FSNamesystem.LOG.error(StringUtils.stringifyException(e)); FSNamesystem.LOG.error("Exception while checking heartbeat", e);
} }
try { try {
Thread.sleep(5000); // 5 seconds Thread.sleep(5000); // 5 seconds
@ -4294,7 +4294,7 @@ synchronized void leave(boolean checkForUpgrades) {
try { try {
needUpgrade = startDistributedUpgradeIfNeeded(); needUpgrade = startDistributedUpgradeIfNeeded();
} catch(IOException e) { } catch(IOException e) {
FSNamesystem.LOG.error(StringUtils.stringifyException(e)); FSNamesystem.LOG.error("IOException in startDistributedUpgradeIfNeeded", e);
} }
if(needUpgrade) { if(needUpgrade) {
// switch to manual safe mode // switch to manual safe mode

View File

@ -691,7 +691,7 @@ public void stop() {
try { try {
if (httpServer != null) httpServer.stop(); if (httpServer != null) httpServer.stop();
} catch (Exception e) { } catch (Exception e) {
LOG.error(StringUtils.stringifyException(e)); LOG.error("Exception while stopping httpserver", e);
} }
if(namesystem != null) namesystem.close(); if(namesystem != null) namesystem.close();
if(emptier != null) emptier.interrupt(); if(emptier != null) emptier.interrupt();
@ -1702,7 +1702,7 @@ public static void main(String argv[]) throws Exception {
if (namenode != null) if (namenode != null)
namenode.join(); namenode.join();
} catch (Throwable e) { } catch (Throwable e) {
LOG.error(StringUtils.stringifyException(e)); LOG.error("Exception in namenode join", e);
System.exit(-1); System.exit(-1);
} }
} }

View File

@ -254,7 +254,7 @@ public void shutdown() {
try { try {
if (checkpointImage != null) checkpointImage.close(); if (checkpointImage != null) checkpointImage.close();
} catch(IOException e) { } catch(IOException e) {
LOG.warn(StringUtils.stringifyException(e)); LOG.warn("Exception while closing CheckpointStorage", e);
} }
} }
@ -264,7 +264,7 @@ public void run() {
try { try {
ugi = UserGroupInformation.getLoginUser(); ugi = UserGroupInformation.getLoginUser();
} catch (IOException e) { } catch (IOException e) {
LOG.error(StringUtils.stringifyException(e)); LOG.error("Exception while getting login user", e);
e.printStackTrace(); e.printStackTrace();
Runtime.getRuntime().exit(-1); Runtime.getRuntime().exit(-1);
} }
@ -316,13 +316,11 @@ public void doWork() {
lastCheckpointTime = now; lastCheckpointTime = now;
} }
} catch (IOException e) { } catch (IOException e) {
LOG.error("Exception in doCheckpoint: "); LOG.error("Exception in doCheckpoint", e);
LOG.error(StringUtils.stringifyException(e));
e.printStackTrace(); e.printStackTrace();
checkpointImage.getStorage().imageDigest = null; checkpointImage.getStorage().imageDigest = null;
} catch (Throwable e) { } catch (Throwable e) {
LOG.error("Throwable Exception in doCheckpoint: "); LOG.error("Throwable Exception in doCheckpoint", e);
LOG.error(StringUtils.stringifyException(e));
e.printStackTrace(); e.printStackTrace();
Runtime.getRuntime().exit(-1); Runtime.getRuntime().exit(-1);
} }