HBASE-4436 Remove and convert @deprecated RemoteExceptionHandler.decodeRemoteException calls (Talat Uyarer)

This commit is contained in:
stack 2014-07-28 18:16:43 -07:00
parent afae1e2583
commit 6a74ef1542
14 changed files with 69 additions and 171 deletions

View File

@ -1,120 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.RemoteException;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
/**
* An immutable class which contains a static method for handling
* org.apache.hadoop.ipc.RemoteException exceptions.
*/
@InterfaceAudience.Private
public class RemoteExceptionHandler {
/* Not instantiable */
private RemoteExceptionHandler() {super();}
/**
* Examine passed Throwable. See if its carrying a RemoteException. If so,
* run {@link #decodeRemoteException(RemoteException)} on it. Otherwise,
* pass back <code>t</code> unaltered.
* @param t Throwable to examine.
* @return Decoded RemoteException carried by <code>t</code> or
* <code>t</code> unaltered.
*/
public static Throwable checkThrowable(final Throwable t) {
Throwable result = t;
if (t instanceof RemoteException) {
try {
result =
RemoteExceptionHandler.decodeRemoteException((RemoteException)t);
} catch (Throwable tt) {
result = tt;
}
}
return result;
}
/**
* Examine passed IOException. See if its carrying a RemoteException. If so,
* run {@link #decodeRemoteException(RemoteException)} on it. Otherwise,
* pass back <code>e</code> unaltered.
* @param e Exception to examine.
* @return Decoded RemoteException carried by <code>e</code> or
* <code>e</code> unaltered.
*/
public static IOException checkIOException(final IOException e) {
Throwable t = checkThrowable(e);
return t instanceof IOException? (IOException)t: new IOException(t);
}
/**
* Converts org.apache.hadoop.ipc.RemoteException into original exception,
* if possible. If the original exception is an Error or a RuntimeException,
* throws the original exception.
*
* @param re original exception
* @return decoded RemoteException if it is an instance of or a subclass of
* IOException, or the original RemoteException if it cannot be decoded.
*
* @throws IOException indicating a server error ocurred if the decoded
* exception is not an IOException. The decoded exception is set as
* the cause.
* @deprecated Use {@link RemoteException#unwrapRemoteException()} instead.
* In fact we should look into deprecating this whole class - St.Ack 2010929
*/
public static IOException decodeRemoteException(final RemoteException re)
throws IOException {
IOException i = re;
try {
Class<?> c = Class.forName(re.getClassName());
Class<?>[] parameterTypes = { String.class };
Constructor<?> ctor = c.getConstructor(parameterTypes);
Object[] arguments = { re.getMessage() };
Throwable t = (Throwable) ctor.newInstance(arguments);
if (t instanceof IOException) {
i = (IOException) t;
} else {
i = new IOException("server error");
i.initCause(t);
throw i;
}
} catch (ClassNotFoundException x) {
// continue
} catch (NoSuchMethodException x) {
// continue
} catch (IllegalAccessException x) {
// continue
} catch (InvocationTargetException x) {
// continue
} catch (InstantiationException x) {
// continue
}
return i;
}
}

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.UnknownScannerException;
@ -236,7 +235,7 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
} }
IOException ioe = e; IOException ioe = e;
if (e instanceof RemoteException) { if (e instanceof RemoteException) {
ioe = RemoteExceptionHandler.decodeRemoteException((RemoteException)e); ioe = ((RemoteException) e).unwrapRemoteException();
} }
if (logScannerActivity && (ioe instanceof UnknownScannerException)) { if (logScannerActivity && (ioe instanceof UnknownScannerException)) {
try { try {

View File

@ -36,15 +36,14 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.fs.HFileSystem;
@ -56,6 +55,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
/** /**
@ -507,7 +507,8 @@ public class MasterFileSystem {
setInfoFamilyCachingForMeta(true); setInfoFamilyCachingForMeta(true);
HRegion.closeHRegion(meta); HRegion.closeHRegion(meta);
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
LOG.error("bootstrap", e); LOG.error("bootstrap", e);
throw e; throw e;
} }

View File

@ -28,9 +28,9 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.ipc.RemoteException;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
@ -123,7 +123,8 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Chore
FileStatus[] files = FSUtils.listStatus(this.fs, this.oldFileDir); FileStatus[] files = FSUtils.listStatus(this.fs, this.oldFileDir);
checkAndDeleteEntries(files); checkAndDeleteEntries(files);
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
LOG.warn("Error while cleaning the logs", e); LOG.warn("Error while cleaning the logs", e);
} }
} }
@ -182,8 +183,9 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Chore
// if the directory still has children, we can't delete it, so we are done // if the directory still has children, we can't delete it, so we are done
if (!allChildrenDeleted) return false; if (!allChildrenDeleted) return false;
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ?
LOG.warn("Error while listing directory: " + dir, e); ((RemoteException)e).unwrapRemoteException() : e;
LOG.warn("Error while listing directory: " + dir, e);
// couldn't list directory, so don't try to delete, and don't return success // couldn't list directory, so don't try to delete, and don't return success
return false; return false;
} }
@ -261,7 +263,8 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Chore
+ ", but couldn't. Run cleaner chain and attempt to delete on next pass."); + ", but couldn't. Run cleaner chain and attempt to delete on next pass.");
} }
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
LOG.warn("Error while deleting: " + filePath, e); LOG.warn("Error while deleting: " + filePath, e);
} }
} }

View File

@ -37,11 +37,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -486,7 +486,8 @@ public class CompactSplitThread implements CompactionRequestor {
} }
} }
} catch (IOException ex) { } catch (IOException ex) {
IOException remoteEx = RemoteExceptionHandler.checkIOException(ex); IOException remoteEx =
ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
LOG.error("Compaction failed " + this, remoteEx); LOG.error("Compaction failed " + this, remoteEx);
if (remoteEx != ex) { if (remoteEx != ex) {
LOG.info("Compaction failed at original callstack: " + formatStackTrace(ex)); LOG.info("Compaction failed at original callstack: " + formatStackTrace(ex));

View File

@ -64,15 +64,14 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.HealthCheckChore;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.ZNodeClearer; import org.apache.hadoop.hbase.ZNodeClearer;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionManager;
@ -1112,7 +1111,8 @@ public class HRegionServer extends HasThread implements
try { try {
this.hlogForMeta.close(); this.hlogForMeta.close();
} catch (Throwable e) { } catch (Throwable e) {
LOG.error("Metalog close and delete failed", RemoteExceptionHandler.checkThrowable(e)); e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
LOG.error("Metalog close and delete failed", e);
} }
} }
if (this.hlog != null) { if (this.hlog != null) {
@ -1123,7 +1123,8 @@ public class HRegionServer extends HasThread implements
hlog.close(); hlog.close();
} }
} catch (Throwable e) { } catch (Throwable e) {
LOG.error("Close and delete failed", RemoteExceptionHandler.checkThrowable(e)); e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
LOG.error("Close and delete failed", e);
} }
} }
} }
@ -2623,10 +2624,11 @@ public class HRegionServer extends HasThread implements
LOG.debug("NotServingRegionException; " + t.getMessage()); LOG.debug("NotServingRegionException; " + t.getMessage());
return t; return t;
} }
Throwable e = t instanceof RemoteException ? ((RemoteException) t).unwrapRemoteException() : t;
if (msg == null) { if (msg == null) {
LOG.error("", RemoteExceptionHandler.checkThrowable(t)); LOG.error("", e);
} else { } else {
LOG.error(msg, RemoteExceptionHandler.checkThrowable(t)); LOG.error(msg, e);
} }
if (!rpcServices.checkOOME(t)) { if (!rpcServices.checkOOME(t)) {
checkFileSystem(); checkFileSystem();

View File

@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression;
@ -87,6 +86,7 @@ import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -1615,7 +1615,8 @@ public class HStore implements Store {
this.fs.removeStoreFiles(this.getColumnFamilyName(), compactedFiles); this.fs.removeStoreFiles(this.getColumnFamilyName(), compactedFiles);
} }
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
LOG.error("Failed removing compacted files in " + this + LOG.error("Failed removing compacted files in " + this +
". Files we were trying to remove are " + compactedFiles.toString() + ". Files we were trying to remove are " + compactedFiles.toString() +
"; some of them may have been already removed", e); "; some of them may have been already removed", e);

View File

@ -18,22 +18,26 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.ipc.RemoteException;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
/** /**
* Runs periodically to determine if the HLog should be rolled. * Runs periodically to determine if the HLog should be rolled.
@ -105,7 +109,7 @@ class LogRoller extends HasThread implements WALActionsListener {
} catch (IOException ex) { } catch (IOException ex) {
// Abort if we get here. We probably won't recover an IOE. HBASE-1132 // Abort if we get here. We probably won't recover an IOE. HBASE-1132
server.abort("IOE in log roller", server.abort("IOE in log roller",
RemoteExceptionHandler.checkIOException(ex)); ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex);
} catch (Exception ex) { } catch (Exception ex) {
LOG.error("Log rolling failed", ex); LOG.error("Log rolling failed", ex);
server.abort("Log rolling failed", ex); server.abort("Log rolling failed", ex);

View File

@ -43,15 +43,15 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Counter;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.htrace.Trace; import org.htrace.Trace;
import org.htrace.TraceScope; import org.htrace.TraceScope;
import org.apache.hadoop.hbase.util.Counter;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -434,9 +434,11 @@ class MemStoreFlusher implements FlushRequester {
this.server.compactSplitThread.requestSystemCompaction( this.server.compactSplitThread.requestSystemCompaction(
region, Thread.currentThread().getName()); region, Thread.currentThread().getName());
} catch (IOException e) { } catch (IOException e) {
LOG.error( e = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
LOG.error(
"Cache flush failed for region " + Bytes.toStringBinary(region.getRegionName()), "Cache flush failed for region " + Bytes.toStringBinary(region.getRegionName()),
RemoteExceptionHandler.checkIOException(e)); e);
} }
} }
} }
@ -494,9 +496,11 @@ class MemStoreFlusher implements FlushRequester {
server.abort("Replay of HLog required. Forcing server shutdown", ex); server.abort("Replay of HLog required. Forcing server shutdown", ex);
return false; return false;
} catch (IOException ex) { } catch (IOException ex) {
LOG.error("Cache flush failed" + ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
(region != null ? (" for region " + Bytes.toStringBinary(region.getRegionName())) : ""), LOG.error(
RemoteExceptionHandler.checkIOException(ex)); "Cache flush failed"
+ (region != null ? (" for region " + Bytes.toStringBinary(region.getRegionName()))
: ""), ex);
if (!server.checkFileSystem()) { if (!server.checkFileSystem()) {
return false; return false;
} }

View File

@ -23,9 +23,9 @@ import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -118,8 +118,8 @@ class RegionMergeRequest implements Runnable {
+ ". Region merge took " + ". Region merge took "
+ StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(), startTime)); + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(), startTime));
} catch (IOException ex) { } catch (IOException ex) {
LOG.error("Merge failed " + this, ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
RemoteExceptionHandler.checkIOException(ex)); LOG.error("Merge failed " + this, ex);
server.checkFileSystem(); server.checkFileSystem();
} finally { } finally {
releaseTableLock(); releaseTableLock();

View File

@ -23,9 +23,9 @@ import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -112,7 +112,8 @@ class SplitRequest implements Runnable {
+ st.getSecondDaughter().getRegionNameAsString() + ". Split took " + st.getSecondDaughter().getRegionNameAsString() + ". Split took "
+ StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime)); + StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
} catch (IOException ex) { } catch (IOException ex) {
LOG.error("Split failed " + this, RemoteExceptionHandler.checkIOException(ex)); ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
LOG.error("Split failed " + this, ex);
server.checkFileSystem(); server.checkFileSystem();
} finally { } finally {
if (this.parent.getCoprocessorHost() != null) { if (this.parent.getCoprocessorHost() != null) {
@ -120,7 +121,7 @@ class SplitRequest implements Runnable {
this.parent.getCoprocessorHost().postCompleteSplit(); this.parent.getCoprocessorHost().postCompleteSplit();
} catch (IOException io) { } catch (IOException io) {
LOG.error("Split failed " + this, LOG.error("Split failed " + this,
RemoteExceptionHandler.checkIOException(io)); io instanceof RemoteException ? ((RemoteException) io).unwrapRemoteException() : io);
} }
} }
releaseTableLock(); releaseTableLock();

View File

@ -56,15 +56,14 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
@ -112,6 +111,7 @@ import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.ipc.RemoteException;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -361,7 +361,7 @@ public class HLogSplitter {
ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs); ZKSplitLog.markCorrupted(rootDir, logfile.getPath().getName(), fs);
isCorrupted = true; isCorrupted = true;
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
throw e; throw e;
} finally { } finally {
LOG.debug("Finishing writing output logs and closing down."); LOG.debug("Finishing writing output logs and closing down.");
@ -1276,7 +1276,8 @@ public class HLogSplitter {
wap.incrementEdits(editsCount); wap.incrementEdits(editsCount);
wap.incrementNanoTime(System.nanoTime() - startTime); wap.incrementNanoTime(System.nanoTime() - startTime);
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
LOG.fatal(" Got while writing log entry to log", e); LOG.fatal(" Got while writing log entry to log", e);
throw e; throw e;
} }
@ -1625,7 +1626,7 @@ public class HLogSplitter {
rsw.incrementEdits(actions.size()); rsw.incrementEdits(actions.size());
rsw.incrementNanoTime(System.nanoTime() - startTime); rsw.incrementNanoTime(System.nanoTime() - startTime);
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
LOG.fatal(" Got while writing log entry to log", e); LOG.fatal(" Got while writing log entry to log", e);
throw e; throw e;
} }

View File

@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.fs.HFileSystem;
@ -73,6 +72,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -399,7 +399,8 @@ public abstract class FSUtils {
return; return;
} }
} catch (IOException e) { } catch (IOException e) {
exception = RemoteExceptionHandler.checkIOException(e); exception = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
} }
try { try {
fs.close(); fs.close();

View File

@ -30,13 +30,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnectable; import org.apache.hadoop.hbase.client.HConnectable;
@ -48,6 +47,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.ipc.RemoteException;
/** /**
* A non-instantiable class that has a static method capable of compacting * A non-instantiable class that has a static method capable of compacting
@ -263,7 +263,7 @@ class HMerge {
} }
return region; return region;
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
LOG.error("meta scanner error", e); LOG.error("meta scanner error", e);
metaScanner.close(); metaScanner.close();
throw e; throw e;