HBASE-930 RegionServer stuck: HLog: Could not append. Requesting close of log java.io.IOException: Could not get block locations. Aborting...
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@705064 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5b2e15021c
commit
f69423a1d9
@ -27,6 +27,8 @@ Release 0.19.0 - Unreleased
|
|||||||
HBASE-928 NPE throwing RetriesExhaustedException
|
HBASE-928 NPE throwing RetriesExhaustedException
|
||||||
HBASE-924 Update hadoop in lib on 0.18 hbase branch to 0.18.1
|
HBASE-924 Update hadoop in lib on 0.18 hbase branch to 0.18.1
|
||||||
HBASE-929 Clarify that ttl in HColumnDescriptor is seconds
|
HBASE-929 Clarify that ttl in HColumnDescriptor is seconds
|
||||||
|
HBASE-930 RegionServer stuck: HLog: Could not append. Requesting close of
|
||||||
|
log java.io.IOException: Could not get block locations. Aborting...
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-901 Add a limit to key length, check key and value length on client side
|
HBASE-901 Add a limit to key length, check key and value length on client side
|
||||||
|
@ -0,0 +1,46 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2008 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when we fail close of the write-ahead-log file.
|
||||||
|
* Package private. Only used inside this package.
|
||||||
|
*/
|
||||||
|
class FailedLogCloseException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1759152841462990925L;
|
||||||
|
|
||||||
|
public FailedLogCloseException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public FailedLogCloseException(String arg0) {
|
||||||
|
super(arg0);
|
||||||
|
}
|
||||||
|
|
||||||
|
public FailedLogCloseException(Throwable arg0) {
|
||||||
|
super(arg0);
|
||||||
|
}
|
||||||
|
|
||||||
|
public FailedLogCloseException(String arg0, Throwable arg1) {
|
||||||
|
super(arg0, arg1);
|
||||||
|
}
|
||||||
|
}
|
@ -178,7 +178,7 @@ class Flusher extends Thread implements FlushRequester {
|
|||||||
// is required. Currently the only way to do this is a restart of
|
// is required. Currently the only way to do this is a restart of
|
||||||
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
|
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
|
||||||
// where hdfs was bad but passed the hdfs check).
|
// where hdfs was bad but passed the hdfs check).
|
||||||
LOG.fatal("Replay of hlog required. Forcing server restart", ex);
|
LOG.fatal("Replay of hlog required. Forcing server shutdown", ex);
|
||||||
server.abort();
|
server.abort();
|
||||||
return false;
|
return false;
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
|
@ -226,9 +226,10 @@ public class HLog implements HConstants {
|
|||||||
* cacheFlushLock and then completeCacheFlush could be called which would wait
|
* cacheFlushLock and then completeCacheFlush could be called which would wait
|
||||||
* for the lock on this and consequently never release the cacheFlushLock
|
* for the lock on this and consequently never release the cacheFlushLock
|
||||||
*
|
*
|
||||||
|
* @throws FailedLogCloseException
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void rollWriter() throws IOException {
|
public void rollWriter() throws FailedLogCloseException, IOException {
|
||||||
this.cacheFlushLock.lock();
|
this.cacheFlushLock.lock();
|
||||||
try {
|
try {
|
||||||
if (closed) {
|
if (closed) {
|
||||||
@ -237,7 +238,14 @@ public class HLog implements HConstants {
|
|||||||
synchronized (updateLock) {
|
synchronized (updateLock) {
|
||||||
if (this.writer != null) {
|
if (this.writer != null) {
|
||||||
// Close the current writer, get a new one.
|
// Close the current writer, get a new one.
|
||||||
this.writer.close();
|
try {
|
||||||
|
this.writer.close();
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Failed close of log file. Means we're losing edits. For now,
|
||||||
|
// shut ourselves down to minimize loss. Alternative is to try and
|
||||||
|
// keep going. See HBASE-930.
|
||||||
|
throw new FailedLogCloseException("#" + this.filenum, e);
|
||||||
|
}
|
||||||
Path p = computeFilename(old_filenum);
|
Path p = computeFilename(old_filenum);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Closing current log writer " + FSUtils.getPath(p));
|
LOG.debug("Closing current log writer " + FSUtils.getPath(p));
|
||||||
|
@ -77,8 +77,11 @@ class LogRoller extends Thread implements LogRollListener {
|
|||||||
try {
|
try {
|
||||||
LOG.info("Rolling hlog. Number of entries: " + server.getLog().getNumEntries());
|
LOG.info("Rolling hlog. Number of entries: " + server.getLog().getNumEntries());
|
||||||
server.getLog().rollWriter();
|
server.getLog().rollWriter();
|
||||||
|
} catch (FailedLogCloseException e) {
|
||||||
|
LOG.fatal("Forcing server shutdown", e);
|
||||||
|
server.abort();
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
LOG.error("Log rolling failed",
|
LOG.error("Log rolling failed with ioe: ",
|
||||||
RemoteExceptionHandler.checkIOException(ex));
|
RemoteExceptionHandler.checkIOException(ex));
|
||||||
server.checkFileSystem();
|
server.checkFileSystem();
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
|
@ -72,10 +72,8 @@ public class FSUtils {
|
|||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
exception = RemoteExceptionHandler.checkIOException(e);
|
exception = RemoteExceptionHandler.checkIOException(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
fs.close();
|
fs.close();
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error("file system close failed: ", e);
|
LOG.error("file system close failed: ", e);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user