HBASE-19919 Tidying up logging
This commit is contained in:
parent
897afb7d18
commit
bac4687345
|
@ -251,9 +251,7 @@ class NettyRpcConnection extends RpcConnection {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void connect() {
|
private void connect() {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.debug("Connecting to {}", remoteId.address);
|
||||||
LOG.debug("Connecting to " + remoteId.address);
|
|
||||||
}
|
|
||||||
|
|
||||||
this.channel = new Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass)
|
this.channel = new Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass)
|
||||||
.option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay())
|
.option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay())
|
||||||
|
|
|
@ -131,8 +131,8 @@ public final class ReadOnlyZKClient implements Closeable {
|
||||||
conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS);
|
conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS);
|
||||||
this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS);
|
this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS);
|
||||||
LOG.info(
|
LOG.info(
|
||||||
"Start read only zookeeper connection {} to {}, " + "session timeout {} ms, retries {}, " +
|
"Connect {} to {} with session timeout={}ms, retries {}, " +
|
||||||
"retry interval {} ms, keep alive {} ms",
|
"retry interval {}ms, keepAlive={}ms",
|
||||||
getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs);
|
getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs);
|
||||||
Threads.setDaemonThreadRunning(new Thread(this::run),
|
Threads.setDaemonThreadRunning(new Thread(this::run),
|
||||||
"ReadOnlyZKClient-" + connectString + "@" + getId());
|
"ReadOnlyZKClient-" + connectString + "@" + getId());
|
||||||
|
@ -313,9 +313,7 @@ public final class ReadOnlyZKClient implements Closeable {
|
||||||
}
|
}
|
||||||
if (task == null) {
|
if (task == null) {
|
||||||
if (pendingRequests == 0) {
|
if (pendingRequests == 0) {
|
||||||
LOG.debug(
|
LOG.debug("{} to {} inactive for {}ms; closing (Will reconnect when new requests)",
|
||||||
"{} to {} no activities for {} ms, close active connection. " +
|
|
||||||
"Will reconnect next time when there are new requests",
|
|
||||||
getId(), connectString, keepAliveTimeMs);
|
getId(), connectString, keepAliveTimeMs);
|
||||||
closeZk();
|
closeZk();
|
||||||
}
|
}
|
||||||
|
|
|
@ -247,7 +247,7 @@ public class ChoreService implements ChoreServicer {
|
||||||
*/
|
*/
|
||||||
static class ChoreServiceThreadFactory implements ThreadFactory {
|
static class ChoreServiceThreadFactory implements ThreadFactory {
|
||||||
private final String threadPrefix;
|
private final String threadPrefix;
|
||||||
private final static String THREAD_NAME_SUFFIX = "_Chore_";
|
private final static String THREAD_NAME_SUFFIX = ".Chore.";
|
||||||
private AtomicInteger threadNumber = new AtomicInteger(1);
|
private AtomicInteger threadNumber = new AtomicInteger(1);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.net;
|
package org.apache.hadoop.hbase.net;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
|
import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
|
||||||
|
@ -58,6 +59,24 @@ public class Address implements Comparable<Address> {
|
||||||
return this.hostAndPort.toString();
|
return this.hostAndPort.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If hostname is a.b.c and the port is 123, return a:123 instead of a.b.c:123.
|
||||||
|
* @return if host looks like it is resolved -- not an IP -- then strip the domain portion
|
||||||
|
* otherwise returns same as {@link #toString()}}
|
||||||
|
*/
|
||||||
|
public String toStringWithoutDomain() {
|
||||||
|
String hostname = getHostname();
|
||||||
|
String [] parts = hostname.split("\\.");
|
||||||
|
if (parts.length > 1) {
|
||||||
|
for (String part: parts) {
|
||||||
|
if (!StringUtils.isNumeric(part)) {
|
||||||
|
return Address.fromParts(parts[0], getPort()).toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return toString();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
// Don't use HostAndPort equals... It is wonky including
|
// Don't use HostAndPort equals... It is wonky including
|
||||||
// ipv6 brackets
|
// ipv6 brackets
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.net;
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.ClassRule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
@Category({ MiscTests.class, SmallTests.class })
|
||||||
|
public class TestAddress {
|
||||||
|
@ClassRule
|
||||||
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
|
HBaseClassTestRule.forClass(TestAddress.class);
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetHostWithoutDomain() {
|
||||||
|
assertEquals("a:123",
|
||||||
|
Address.fromParts("a.b.c", 123).toStringWithoutDomain());
|
||||||
|
assertEquals("1:123",
|
||||||
|
Address.fromParts("1.b.c", 123).toStringWithoutDomain());
|
||||||
|
assertEquals("123.456.789.1:123",
|
||||||
|
Address.fromParts("123.456.789.1", 123).toStringWithoutDomain());
|
||||||
|
assertEquals("[2001:db8::1]:80",
|
||||||
|
Address.fromParts("[2001:db8::1]:", 80).toStringWithoutDomain());
|
||||||
|
assertEquals("[2001:db8::1]:80",
|
||||||
|
Address.fromParts("[2001:db8::1]:", 80).toStringWithoutDomain());
|
||||||
|
}
|
||||||
|
}
|
|
@ -230,9 +230,7 @@ public class ProcedureExecutor<TEnvironment> {
|
||||||
nonceKeysToProcIdsMap.remove(nonceKey);
|
nonceKeysToProcIdsMap.remove(nonceKey);
|
||||||
}
|
}
|
||||||
it.remove();
|
it.remove();
|
||||||
if (debugEnabled) {
|
LOG.trace("Evict completed {}", proc);
|
||||||
LOG.debug("Evict completed " + proc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (batchCount > 0) {
|
if (batchCount > 0) {
|
||||||
|
@ -316,7 +314,6 @@ public class ProcedureExecutor<TEnvironment> {
|
||||||
@Override
|
@Override
|
||||||
public void setMaxProcId(long maxProcId) {
|
public void setMaxProcId(long maxProcId) {
|
||||||
assert lastProcId.get() < 0 : "expected only one call to setMaxProcId()";
|
assert lastProcId.get() < 0 : "expected only one call to setMaxProcId()";
|
||||||
LOG.debug("Load max pid=" + maxProcId);
|
|
||||||
lastProcId.set(maxProcId);
|
lastProcId.set(maxProcId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -512,10 +509,10 @@ public class ProcedureExecutor<TEnvironment> {
|
||||||
// We have numThreads executor + one timer thread used for timing out
|
// We have numThreads executor + one timer thread used for timing out
|
||||||
// procedures and triggering periodic procedures.
|
// procedures and triggering periodic procedures.
|
||||||
this.corePoolSize = numThreads;
|
this.corePoolSize = numThreads;
|
||||||
LOG.info("Starting ProcedureExecutor Worker threads (ProcedureExecutorWorker)=" + corePoolSize);
|
LOG.info("Starting {} Workers (bigger of cpus/4 or 16)", corePoolSize);
|
||||||
|
|
||||||
// Create the Thread Group for the executors
|
// Create the Thread Group for the executors
|
||||||
threadGroup = new ThreadGroup("ProcedureExecutorWorkerGroup");
|
threadGroup = new ThreadGroup("PEWorkerGroup");
|
||||||
|
|
||||||
// Create the timeout executor
|
// Create the timeout executor
|
||||||
timeoutExecutor = new TimeoutExecutorThread(threadGroup);
|
timeoutExecutor = new TimeoutExecutorThread(threadGroup);
|
||||||
|
@ -533,7 +530,7 @@ public class ProcedureExecutor<TEnvironment> {
|
||||||
st = EnvironmentEdgeManager.currentTime();
|
st = EnvironmentEdgeManager.currentTime();
|
||||||
store.recoverLease();
|
store.recoverLease();
|
||||||
et = EnvironmentEdgeManager.currentTime();
|
et = EnvironmentEdgeManager.currentTime();
|
||||||
LOG.info(String.format("Recover store (%s) lease: %s",
|
LOG.info(String.format("Recovered %s lease in %s",
|
||||||
store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
|
store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
|
||||||
|
|
||||||
// start the procedure scheduler
|
// start the procedure scheduler
|
||||||
|
@ -547,7 +544,7 @@ public class ProcedureExecutor<TEnvironment> {
|
||||||
st = EnvironmentEdgeManager.currentTime();
|
st = EnvironmentEdgeManager.currentTime();
|
||||||
load(abortOnCorruption);
|
load(abortOnCorruption);
|
||||||
et = EnvironmentEdgeManager.currentTime();
|
et = EnvironmentEdgeManager.currentTime();
|
||||||
LOG.info(String.format("Load store (%s): %s",
|
LOG.info(String.format("Loaded %s in %s, um pid=",
|
||||||
store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
|
store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
|
||||||
|
|
||||||
// Start the executors. Here we must have the lastProcId set.
|
// Start the executors. Here we must have the lastProcId set.
|
||||||
|
@ -1612,7 +1609,6 @@ public class ProcedureExecutor<TEnvironment> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this procedure is the last child awake the parent procedure
|
// If this procedure is the last child awake the parent procedure
|
||||||
LOG.info("Finish suprocedure " + procedure);
|
|
||||||
if (parent.tryRunnable()) {
|
if (parent.tryRunnable()) {
|
||||||
// If we succeeded in making the parent runnable -- i.e. all of its
|
// If we succeeded in making the parent runnable -- i.e. all of its
|
||||||
// children have completed, move parent to front of the queue.
|
// children have completed, move parent to front of the queue.
|
||||||
|
@ -1711,7 +1707,7 @@ public class ProcedureExecutor<TEnvironment> {
|
||||||
private Procedure activeProcedure;
|
private Procedure activeProcedure;
|
||||||
|
|
||||||
public WorkerThread(final ThreadGroup group) {
|
public WorkerThread(final ThreadGroup group) {
|
||||||
super(group, "ProcedureExecutorWorker-" + workerId.incrementAndGet());
|
super(group, "PEWorker-" + workerId.incrementAndGet());
|
||||||
setDaemon(true);
|
setDaemon(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ public abstract class RemoteProcedureDispatcher<TEnv, TRemote extends Comparable
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.info("Starting procedure remote dispatcher; threads=" + this.corePoolSize +
|
LOG.info("Started, threads=" + this.corePoolSize +
|
||||||
", queueMaxSize=" + this.queueMaxSize + ", operationDelay=" + this.operationDelay);
|
", queueMaxSize=" + this.queueMaxSize + ", operationDelay=" + this.operationDelay);
|
||||||
|
|
||||||
// Create the timeout executor
|
// Create the timeout executor
|
||||||
|
|
|
@ -176,9 +176,7 @@ public abstract class StateMachineProcedure<TEnvironment, TState>
|
||||||
this.cycles++;
|
this.cycles++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("{}", toString());
|
||||||
LOG.debug(toString());
|
|
||||||
}
|
|
||||||
stateFlow = executeFromState(env, state);
|
stateFlow = executeFromState(env, state);
|
||||||
if (!hasMoreState()) setNextState(EOF_STATE);
|
if (!hasMoreState()) setNextState(EOF_STATE);
|
||||||
if (subProcList != null && !subProcList.isEmpty()) {
|
if (subProcList != null && !subProcList.isEmpty()) {
|
||||||
|
|
|
@ -358,7 +358,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
||||||
public void recoverLease() throws IOException {
|
public void recoverLease() throws IOException {
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
LOG.info("Starting WAL Procedure Store lease recovery");
|
LOG.trace("Starting WAL Procedure Store lease recovery");
|
||||||
FileStatus[] oldLogs = getLogFiles();
|
FileStatus[] oldLogs = getLogFiles();
|
||||||
while (isRunning()) {
|
while (isRunning()) {
|
||||||
// Get Log-MaxID and recover lease on old logs
|
// Get Log-MaxID and recover lease on old logs
|
||||||
|
@ -387,7 +387,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.info("Lease acquired for flushLogId: " + flushLogId);
|
LOG.trace("Lease acquired for flushLogId={}", flushLogId);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -405,9 +405,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
||||||
|
|
||||||
// Nothing to do, If we have only the current log.
|
// Nothing to do, If we have only the current log.
|
||||||
if (logs.size() == 1) {
|
if (logs.size() == 1) {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("No state logs to replay.");
|
||||||
LOG.debug("No state logs to replay.");
|
|
||||||
}
|
|
||||||
loader.setMaxProcId(0);
|
loader.setMaxProcId(0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -451,7 +451,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
|
||||||
ReplicationPeerZKImpl previous =
|
ReplicationPeerZKImpl previous =
|
||||||
((ConcurrentMap<String, ReplicationPeerZKImpl>) peerClusters).putIfAbsent(peerId, peer);
|
((ConcurrentMap<String, ReplicationPeerZKImpl>) peerClusters).putIfAbsent(peerId, peer);
|
||||||
if (previous == null) {
|
if (previous == null) {
|
||||||
LOG.info("Added new peer cluster=" + peer.getPeerConfig().getClusterKey());
|
LOG.info("Added peer cluster=" + peer.getPeerConfig().getClusterKey());
|
||||||
} else {
|
} else {
|
||||||
LOG.info("Peer already present, " + previous.getPeerConfig().getClusterKey() +
|
LOG.info("Peer already present, " + previous.getPeerConfig().getClusterKey() +
|
||||||
", new cluster=" + peer.getPeerConfig().getClusterKey());
|
", new cluster=" + peer.getPeerConfig().getClusterKey());
|
||||||
|
|
|
@ -151,7 +151,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
}
|
}
|
||||||
Task task = findOrCreateOrphanTask(path);
|
Task task = findOrCreateOrphanTask(path);
|
||||||
if (task.isOrphan() && (task.incarnation.get() == 0)) {
|
if (task.isOrphan() && (task.incarnation.get() == 0)) {
|
||||||
LOG.info("resubmitting unassigned orphan task " + path);
|
LOG.info("Resubmitting unassigned orphan task " + path);
|
||||||
// ignore failure to resubmit. The timeout-monitor will handle it later
|
// ignore failure to resubmit. The timeout-monitor will handle it later
|
||||||
// albeit in a more crude fashion
|
// albeit in a more crude fashion
|
||||||
resubmitTask(path, task, FORCE);
|
resubmitTask(path, task, FORCE);
|
||||||
|
@ -202,7 +202,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
SplitLogCounters.tot_mgr_resubmit_force.increment();
|
SplitLogCounters.tot_mgr_resubmit_force.increment();
|
||||||
version = -1;
|
version = -1;
|
||||||
}
|
}
|
||||||
LOG.info("resubmitting task " + path);
|
LOG.info("Resubmitting task " + path);
|
||||||
task.incarnation.incrementAndGet();
|
task.incarnation.incrementAndGet();
|
||||||
boolean result = resubmit(path, version);
|
boolean result = resubmit(path, version);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
|
@ -280,7 +280,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
SplitLogCounters.tot_mgr_rescan_deleted.increment();
|
SplitLogCounters.tot_mgr_rescan_deleted.increment();
|
||||||
}
|
}
|
||||||
SplitLogCounters.tot_mgr_missing_state_in_delete.increment();
|
SplitLogCounters.tot_mgr_missing_state_in_delete.increment();
|
||||||
LOG.debug("deleted task without in memory state " + path);
|
LOG.debug("Deleted task without in memory state " + path);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
synchronized (task) {
|
synchronized (task) {
|
||||||
|
@ -328,13 +328,13 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createNodeSuccess(String path) {
|
private void createNodeSuccess(String path) {
|
||||||
LOG.debug("put up splitlog task at znode " + path);
|
LOG.debug("Put up splitlog task at znode " + path);
|
||||||
getDataSetWatch(path, zkretries);
|
getDataSetWatch(path, zkretries);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createNodeFailure(String path) {
|
private void createNodeFailure(String path) {
|
||||||
// TODO the Manager should split the log locally instead of giving up
|
// TODO the Manager should split the log locally instead of giving up
|
||||||
LOG.warn("failed to create task node" + path);
|
LOG.warn("Failed to create task node " + path);
|
||||||
setDone(path, FAILURE);
|
setDone(path, FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,15 +360,15 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
data = ZKMetadata.removeMetaData(data);
|
data = ZKMetadata.removeMetaData(data);
|
||||||
SplitLogTask slt = SplitLogTask.parseFrom(data);
|
SplitLogTask slt = SplitLogTask.parseFrom(data);
|
||||||
if (slt.isUnassigned()) {
|
if (slt.isUnassigned()) {
|
||||||
LOG.debug("task not yet acquired " + path + " ver = " + version);
|
LOG.debug("Task not yet acquired " + path + ", ver=" + version);
|
||||||
handleUnassignedTask(path);
|
handleUnassignedTask(path);
|
||||||
} else if (slt.isOwned()) {
|
} else if (slt.isOwned()) {
|
||||||
heartbeat(path, version, slt.getServerName());
|
heartbeat(path, version, slt.getServerName());
|
||||||
} else if (slt.isResigned()) {
|
} else if (slt.isResigned()) {
|
||||||
LOG.info("task " + path + " entered state: " + slt.toString());
|
LOG.info("Task " + path + " entered state=" + slt.toString());
|
||||||
resubmitOrFail(path, FORCE);
|
resubmitOrFail(path, FORCE);
|
||||||
} else if (slt.isDone()) {
|
} else if (slt.isDone()) {
|
||||||
LOG.info("task " + path + " entered state: " + slt.toString());
|
LOG.info("Task " + path + " entered state=" + slt.toString());
|
||||||
if (taskFinisher != null && !ZKSplitLog.isRescanNode(watcher, path)) {
|
if (taskFinisher != null && !ZKSplitLog.isRescanNode(watcher, path)) {
|
||||||
if (taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == Status.DONE) {
|
if (taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == Status.DONE) {
|
||||||
setDone(path, SUCCESS);
|
setDone(path, SUCCESS);
|
||||||
|
@ -379,7 +379,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
setDone(path, SUCCESS);
|
setDone(path, SUCCESS);
|
||||||
}
|
}
|
||||||
} else if (slt.isErr()) {
|
} else if (slt.isErr()) {
|
||||||
LOG.info("task " + path + " entered state: " + slt.toString());
|
LOG.info("Task " + path + " entered state=" + slt.toString());
|
||||||
resubmitOrFail(path, CHECK);
|
resubmitOrFail(path, CHECK);
|
||||||
} else {
|
} else {
|
||||||
LOG.error(HBaseMarkers.FATAL, "logic error - unexpected zk state for path = "
|
LOG.error(HBaseMarkers.FATAL, "logic error - unexpected zk state for path = "
|
||||||
|
@ -395,7 +395,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
}
|
}
|
||||||
|
|
||||||
private void getDataSetWatchFailure(String path) {
|
private void getDataSetWatchFailure(String path) {
|
||||||
LOG.warn("failed to set data watch " + path);
|
LOG.warn("Failed to set data watch " + path);
|
||||||
setDone(path, FAILURE);
|
setDone(path, FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
if (task == null) {
|
if (task == null) {
|
||||||
if (!ZKSplitLog.isRescanNode(watcher, path)) {
|
if (!ZKSplitLog.isRescanNode(watcher, path)) {
|
||||||
SplitLogCounters.tot_mgr_unacquired_orphan_done.increment();
|
SplitLogCounters.tot_mgr_unacquired_orphan_done.increment();
|
||||||
LOG.debug("unacquired orphan task is done " + path);
|
LOG.debug("Unacquired orphan task is done " + path);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
synchronized (task) {
|
synchronized (task) {
|
||||||
|
@ -441,7 +441,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
|
|
||||||
private Task findOrCreateOrphanTask(String path) {
|
private Task findOrCreateOrphanTask(String path) {
|
||||||
return computeIfAbsent(details.getTasks(), path, Task::new, () -> {
|
return computeIfAbsent(details.getTasks(), path, Task::new, () -> {
|
||||||
LOG.info("creating orphan task " + path);
|
LOG.info("Creating orphan task " + path);
|
||||||
SplitLogCounters.tot_mgr_orphan_task_acquired.increment();
|
SplitLogCounters.tot_mgr_orphan_task_acquired.increment();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -450,7 +450,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
Task task = findOrCreateOrphanTask(path);
|
Task task = findOrCreateOrphanTask(path);
|
||||||
if (new_version != task.last_version) {
|
if (new_version != task.last_version) {
|
||||||
if (task.isUnassigned()) {
|
if (task.isUnassigned()) {
|
||||||
LOG.info("task " + path + " acquired by " + workerName);
|
LOG.info("Task " + path + " acquired by " + workerName);
|
||||||
}
|
}
|
||||||
task.heartbeat(EnvironmentEdgeManager.currentTime(), new_version, workerName);
|
task.heartbeat(EnvironmentEdgeManager.currentTime(), new_version, workerName);
|
||||||
SplitLogCounters.tot_mgr_heartbeat.increment();
|
SplitLogCounters.tot_mgr_heartbeat.increment();
|
||||||
|
@ -468,11 +468,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
try {
|
try {
|
||||||
orphans = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.znodePaths.splitLogZNode);
|
orphans = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.znodePaths.splitLogZNode);
|
||||||
if (orphans == null) {
|
if (orphans == null) {
|
||||||
LOG.warn("could not get children of " + this.watcher.znodePaths.splitLogZNode);
|
LOG.warn("Could not get children of " + this.watcher.znodePaths.splitLogZNode);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} catch (KeeperException e) {
|
} catch (KeeperException e) {
|
||||||
LOG.warn("could not get children of " + this.watcher.znodePaths.splitLogZNode + " "
|
LOG.warn("Could not get children of " + this.watcher.znodePaths.splitLogZNode + " "
|
||||||
+ StringUtils.stringifyException(e));
|
+ StringUtils.stringifyException(e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -483,9 +483,9 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
String nodepath = ZNodePaths.joinZNode(watcher.znodePaths.splitLogZNode, path);
|
String nodepath = ZNodePaths.joinZNode(watcher.znodePaths.splitLogZNode, path);
|
||||||
if (ZKSplitLog.isRescanNode(watcher, nodepath)) {
|
if (ZKSplitLog.isRescanNode(watcher, nodepath)) {
|
||||||
rescan_nodes++;
|
rescan_nodes++;
|
||||||
LOG.debug("found orphan rescan node " + path);
|
LOG.debug("Found orphan rescan node " + path);
|
||||||
} else {
|
} else {
|
||||||
LOG.info("found orphan task " + path);
|
LOG.info("Found orphan task " + path);
|
||||||
}
|
}
|
||||||
getDataSetWatch(nodepath, zkretries);
|
getDataSetWatch(nodepath, zkretries);
|
||||||
}
|
}
|
||||||
|
@ -511,11 +511,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
SplitLogTask slt =
|
SplitLogTask slt =
|
||||||
new SplitLogTask.Unassigned(this.details.getServerName());
|
new SplitLogTask.Unassigned(this.details.getServerName());
|
||||||
if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) {
|
if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) {
|
||||||
LOG.debug("failed to resubmit task " + path + " version changed");
|
LOG.debug("Failed to resubmit task " + path + " version changed");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} catch (NoNodeException e) {
|
} catch (NoNodeException e) {
|
||||||
LOG.warn("failed to resubmit because znode doesn't exist " + path
|
LOG.warn("Failed to resubmit because znode doesn't exist " + path
|
||||||
+ " task done (or forced done by removing the znode)");
|
+ " task done (or forced done by removing the znode)");
|
||||||
try {
|
try {
|
||||||
getDataSetWatchSuccess(path, null, Integer.MIN_VALUE);
|
getDataSetWatchSuccess(path, null, Integer.MIN_VALUE);
|
||||||
|
@ -525,11 +525,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
} catch (KeeperException.BadVersionException e) {
|
} catch (KeeperException.BadVersionException e) {
|
||||||
LOG.debug("failed to resubmit task " + path + " version changed");
|
LOG.debug("Failed to resubmit task " + path + " version changed");
|
||||||
return false;
|
return false;
|
||||||
} catch (KeeperException e) {
|
} catch (KeeperException e) {
|
||||||
SplitLogCounters.tot_mgr_resubmit_failed.increment();
|
SplitLogCounters.tot_mgr_resubmit_failed.increment();
|
||||||
LOG.warn("failed to resubmit " + path, e);
|
LOG.warn("Failed to resubmit " + path, e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -590,11 +590,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
// this manager are get-znode-data, task-finisher and delete-znode.
|
// this manager are get-znode-data, task-finisher and delete-znode.
|
||||||
// And all code pieces correctly handle the case of suddenly
|
// And all code pieces correctly handle the case of suddenly
|
||||||
// disappearing task-znode.
|
// disappearing task-znode.
|
||||||
LOG.debug("found pre-existing znode " + path);
|
LOG.debug("Found pre-existing znode " + path);
|
||||||
SplitLogCounters.tot_mgr_node_already_exists.increment();
|
SplitLogCounters.tot_mgr_node_already_exists.increment();
|
||||||
} else {
|
} else {
|
||||||
Long retry_count = (Long) ctx;
|
Long retry_count = (Long) ctx;
|
||||||
LOG.warn("create rc =" + KeeperException.Code.get(rc) + " for " + path
|
LOG.warn("Create rc=" + KeeperException.Code.get(rc) + " for " + path
|
||||||
+ " remaining retries=" + retry_count);
|
+ " remaining retries=" + retry_count);
|
||||||
if (retry_count == 0) {
|
if (retry_count == 0) {
|
||||||
SplitLogCounters.tot_mgr_node_create_err.increment();
|
SplitLogCounters.tot_mgr_node_create_err.increment();
|
||||||
|
@ -625,7 +625,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
}
|
}
|
||||||
if (rc == KeeperException.Code.NONODE.intValue()) {
|
if (rc == KeeperException.Code.NONODE.intValue()) {
|
||||||
SplitLogCounters.tot_mgr_get_data_nonode.increment();
|
SplitLogCounters.tot_mgr_get_data_nonode.increment();
|
||||||
LOG.warn("task znode " + path + " vanished or not created yet.");
|
LOG.warn("Task znode " + path + " vanished or not created yet.");
|
||||||
// ignore since we should not end up in a case where there is in-memory task,
|
// ignore since we should not end up in a case where there is in-memory task,
|
||||||
// but no znode. The only case is between the time task is created in-memory
|
// but no znode. The only case is between the time task is created in-memory
|
||||||
// and the znode is created. See HBASE-11217.
|
// and the znode is created. See HBASE-11217.
|
||||||
|
@ -634,11 +634,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
Long retry_count = (Long) ctx;
|
Long retry_count = (Long) ctx;
|
||||||
|
|
||||||
if (retry_count < 0) {
|
if (retry_count < 0) {
|
||||||
LOG.warn("getdata rc = " + KeeperException.Code.get(rc) + " " + path
|
LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path
|
||||||
+ ". Ignoring error. No error handling. No retrying.");
|
+ ". Ignoring error. No error handling. No retrying.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LOG.warn("getdata rc = " + KeeperException.Code.get(rc) + " " + path
|
LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path
|
||||||
+ " remaining retries=" + retry_count);
|
+ " remaining retries=" + retry_count);
|
||||||
if (retry_count == 0) {
|
if (retry_count == 0) {
|
||||||
SplitLogCounters.tot_mgr_get_data_err.increment();
|
SplitLogCounters.tot_mgr_get_data_err.increment();
|
||||||
|
@ -675,10 +675,10 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
if (rc != KeeperException.Code.NONODE.intValue()) {
|
if (rc != KeeperException.Code.NONODE.intValue()) {
|
||||||
SplitLogCounters.tot_mgr_node_delete_err.increment();
|
SplitLogCounters.tot_mgr_node_delete_err.increment();
|
||||||
Long retry_count = (Long) ctx;
|
Long retry_count = (Long) ctx;
|
||||||
LOG.warn("delete rc=" + KeeperException.Code.get(rc) + " for " + path
|
LOG.warn("Delete rc=" + KeeperException.Code.get(rc) + " for " + path
|
||||||
+ " remaining retries=" + retry_count);
|
+ " remaining retries=" + retry_count);
|
||||||
if (retry_count == 0) {
|
if (retry_count == 0) {
|
||||||
LOG.warn("delete failed " + path);
|
LOG.warn("Delete failed " + path);
|
||||||
details.getFailedDeletions().add(path);
|
details.getFailedDeletions().add(path);
|
||||||
deleteNodeFailure(path);
|
deleteNodeFailure(path);
|
||||||
} else {
|
} else {
|
||||||
|
@ -691,7 +691,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements
|
||||||
+ " in earlier retry rounds. zkretries = " + ctx);
|
+ " in earlier retry rounds. zkretries = " + ctx);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("deleted " + path);
|
LOG.debug("Deleted " + path);
|
||||||
}
|
}
|
||||||
deleteNodeSuccess(path);
|
deleteNodeSuccess(path);
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,8 +157,7 @@ public abstract class CoprocessorHost<C extends Coprocessor, E extends Coprocess
|
||||||
E env = checkAndLoadInstance(implClass, priority, conf);
|
E env = checkAndLoadInstance(implClass, priority, conf);
|
||||||
if (env != null) {
|
if (env != null) {
|
||||||
this.coprocEnvironments.add(env);
|
this.coprocEnvironments.add(env);
|
||||||
LOG.info(
|
LOG.info("System coprocessor {} loaded, priority={}.", className, priority);
|
||||||
"System coprocessor " + className + " was loaded " + "successfully with priority (" + priority + ").");
|
|
||||||
++priority;
|
++priority;
|
||||||
}
|
}
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
|
|
|
@ -251,8 +251,6 @@ public abstract class RpcExecutor {
|
||||||
double handlerFailureThreshhold = conf == null ? 1.0 : conf.getDouble(
|
double handlerFailureThreshhold = conf == null ? 1.0 : conf.getDouble(
|
||||||
HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT,
|
HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT,
|
||||||
HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT);
|
HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT);
|
||||||
LOG.debug("Started " + handlers.size() + " " + threadPrefix +
|
|
||||||
" handlers, qsize=" + qsize + " on port=" + port);
|
|
||||||
for (int i = 0; i < numHandlers; i++) {
|
for (int i = 0; i < numHandlers; i++) {
|
||||||
final int index = qindex + (i % qsize);
|
final int index = qindex + (i % qsize);
|
||||||
String name = "RpcServer." + threadPrefix + ".handler=" + handlers.size() + ",queue=" + index
|
String name = "RpcServer." + threadPrefix + ".handler=" + handlers.size() + ",queue=" + index
|
||||||
|
@ -262,6 +260,8 @@ public abstract class RpcExecutor {
|
||||||
handler.start();
|
handler.start();
|
||||||
handlers.add(handler);
|
handlers.add(handler);
|
||||||
}
|
}
|
||||||
|
LOG.debug("Started " + handlers.size() + " " + threadPrefix +
|
||||||
|
" handlers, queues=" + qsize + ", port=" + port);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -125,7 +125,7 @@ public class ActiveMasterManager extends ZKListener {
|
||||||
synchronized(clusterHasActiveMaster) {
|
synchronized(clusterHasActiveMaster) {
|
||||||
if (ZKUtil.watchAndCheckExists(watcher, watcher.znodePaths.masterAddressZNode)) {
|
if (ZKUtil.watchAndCheckExists(watcher, watcher.znodePaths.masterAddressZNode)) {
|
||||||
// A master node exists, there is an active master
|
// A master node exists, there is an active master
|
||||||
LOG.debug("A master is now available");
|
LOG.trace("A master is now available");
|
||||||
clusterHasActiveMaster.set(true);
|
clusterHasActiveMaster.set(true);
|
||||||
} else {
|
} else {
|
||||||
// Node is no longer there, cluster does not have an active master
|
// Node is no longer there, cluster does not have an active master
|
||||||
|
@ -178,7 +178,7 @@ public class ActiveMasterManager extends ZKListener {
|
||||||
// We are the master, return
|
// We are the master, return
|
||||||
startupStatus.setStatus("Successfully registered as active master.");
|
startupStatus.setStatus("Successfully registered as active master.");
|
||||||
this.clusterHasActiveMaster.set(true);
|
this.clusterHasActiveMaster.set(true);
|
||||||
LOG.info("Registered Active Master=" + this.sn);
|
LOG.info("Registered as active master=" + this.sn);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -745,7 +745,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
boolean wasUp = this.clusterStatusTracker.isClusterUp();
|
boolean wasUp = this.clusterStatusTracker.isClusterUp();
|
||||||
if (!wasUp) this.clusterStatusTracker.setClusterUp();
|
if (!wasUp) this.clusterStatusTracker.setClusterUp();
|
||||||
|
|
||||||
LOG.info("Server active/primary master=" + this.serverName +
|
LOG.info("Active/primary master=" + this.serverName +
|
||||||
", sessionid=0x" +
|
", sessionid=0x" +
|
||||||
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
|
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
|
||||||
", setting cluster-up flag (Was=" + wasUp + ")");
|
", setting cluster-up flag (Was=" + wasUp + ")");
|
||||||
|
@ -1145,7 +1145,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
startProcedureExecutor();
|
startProcedureExecutor();
|
||||||
|
|
||||||
// Start log cleaner thread
|
// Start log cleaner thread
|
||||||
int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
|
int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
|
||||||
this.logCleaner =
|
this.logCleaner =
|
||||||
new LogCleaner(cleanerInterval,
|
new LogCleaner(cleanerInterval,
|
||||||
this, conf, getMasterWalManager().getFileSystem(),
|
this, conf, getMasterWalManager().getFileSystem(),
|
||||||
|
@ -1222,8 +1222,9 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
|
procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
|
||||||
configurationManager.registerObserver(procEnv);
|
configurationManager.registerObserver(procEnv);
|
||||||
|
|
||||||
|
int cpus = Runtime.getRuntime().availableProcessors();
|
||||||
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
|
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
|
||||||
Math.max(Runtime.getRuntime().availableProcessors(),
|
Math.max((cpus > 0? cpus/4: 0),
|
||||||
MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
|
MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
|
||||||
final boolean abortOnCorruption = conf.getBoolean(
|
final boolean abortOnCorruption = conf.getBoolean(
|
||||||
MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
|
MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
|
||||||
|
@ -3549,7 +3550,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
LOG.info("Running RecoverMetaProcedure to ensure proper hbase:meta deploy.");
|
LOG.info("Running RecoverMetaProcedure to ensure proper hbase:meta deploy.");
|
||||||
long procId = procedureExecutor.submitProcedure(new RecoverMetaProcedure(null, true, latch));
|
long procId = procedureExecutor.submitProcedure(new RecoverMetaProcedure(null, true, latch));
|
||||||
latch.await();
|
latch.await();
|
||||||
LOG.info("hbase:meta (default replica) deployed at=" +
|
LOG.info("hbase:meta deployed at=" +
|
||||||
getMetaTableLocator().getMetaRegionLocation(getZooKeeper()));
|
getMetaTableLocator().getMetaRegionLocation(getZooKeeper()));
|
||||||
return assignmentManager.isMetaInitialized();
|
return assignmentManager.isMetaInitialized();
|
||||||
}
|
}
|
||||||
|
|
|
@ -150,7 +150,7 @@ public class MasterCoprocessorHost
|
||||||
// only one MasterCoprocessorHost instance in the master process
|
// only one MasterCoprocessorHost instance in the master process
|
||||||
boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
|
boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
|
||||||
DEFAULT_COPROCESSORS_ENABLED);
|
DEFAULT_COPROCESSORS_ENABLED);
|
||||||
LOG.info("System coprocessor loading is " + (coprocessorsEnabled ? "enabled" : "disabled"));
|
LOG.trace("System coprocessor loading is {}", (coprocessorsEnabled ? "enabled" : "disabled"));
|
||||||
loadSystemCoprocessors(conf, MASTER_COPROCESSOR_CONF_KEY);
|
loadSystemCoprocessors(conf, MASTER_COPROCESSOR_CONF_KEY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -424,7 +424,7 @@ public class ServerManager {
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) {
|
void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) {
|
||||||
LOG.info("Registering server=" + serverName);
|
LOG.info("Registering regionserver=" + serverName);
|
||||||
this.onlineServers.put(serverName, sl);
|
this.onlineServers.put(serverName, sl);
|
||||||
this.rsAdmins.remove(serverName);
|
this.rsAdmins.remove(serverName);
|
||||||
}
|
}
|
||||||
|
@ -521,7 +521,7 @@ public class ServerManager {
|
||||||
}
|
}
|
||||||
sb.append(key);
|
sb.append(key);
|
||||||
}
|
}
|
||||||
LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
|
LOG.info("Waiting on regionserver(s) " + sb.toString());
|
||||||
previousLogTime = System.currentTimeMillis();
|
previousLogTime = System.currentTimeMillis();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -860,7 +860,7 @@ public class ServerManager {
|
||||||
if (oldCount != count || lastLogTime + interval < now) {
|
if (oldCount != count || lastLogTime + interval < now) {
|
||||||
lastLogTime = now;
|
lastLogTime = now;
|
||||||
String msg =
|
String msg =
|
||||||
"Waiting on RegionServer count=" + count + " to settle; waited="+
|
"Waiting on regionserver count=" + count + "; waited="+
|
||||||
slept + "ms, expecting min=" + minToStart + " server(s), max="+ getStrForMax(maxToStart) +
|
slept + "ms, expecting min=" + minToStart + " server(s), max="+ getStrForMax(maxToStart) +
|
||||||
" server(s), " + "timeout=" + timeout + "ms, lastChange=" + (lastCountChange - now) + "ms";
|
" server(s), " + "timeout=" + timeout + "ms, lastChange=" + (lastCountChange - now) + "ms";
|
||||||
LOG.info(msg);
|
LOG.info(msg);
|
||||||
|
@ -883,7 +883,7 @@ public class ServerManager {
|
||||||
if (isClusterShutdown()) {
|
if (isClusterShutdown()) {
|
||||||
this.master.stop("Cluster shutdown");
|
this.master.stop("Cluster shutdown");
|
||||||
}
|
}
|
||||||
LOG.info("Finished wait on RegionServer count=" + count + "; waited=" + slept + "ms," +
|
LOG.info("Finished waiting on RegionServer count=" + count + "; waited=" + slept + "ms," +
|
||||||
" expected min=" + minToStart + " server(s), max=" + getStrForMax(maxToStart) + " server(s),"+
|
" expected min=" + minToStart + " server(s), max=" + getStrForMax(maxToStart) + " server(s),"+
|
||||||
" master is "+ (this.master.isStopped() ? "stopped.": "running"));
|
" master is "+ (this.master.isStopped() ? "stopped.": "running"));
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
import org.apache.hadoop.hbase.util.HasThread;
|
||||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -120,7 +121,8 @@ public class SplitLogManager {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.server = master;
|
this.server = master;
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.choreService = new ChoreService(master.getServerName() + "_splitLogManager_");
|
this.choreService =
|
||||||
|
new ChoreService(((HasThread)master).getName() + ".splitLogManager.");
|
||||||
if (server.getCoordinatedStateManager() != null) {
|
if (server.getCoordinatedStateManager() != null) {
|
||||||
SplitLogManagerCoordination coordination = getSplitLogManagerCoordination();
|
SplitLogManagerCoordination coordination = getSplitLogManagerCoordination();
|
||||||
Set<String> failedDeletions = Collections.synchronizedSet(new HashSet<String>());
|
Set<String> failedDeletions = Collections.synchronizedSet(new HashSet<String>());
|
||||||
|
|
|
@ -222,10 +222,11 @@ public class TableStateManager {
|
||||||
});
|
});
|
||||||
for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
|
for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
|
||||||
String table = entry.getKey();
|
String table = entry.getKey();
|
||||||
if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
|
if (table.equals(TableName.META_TABLE_NAME.getNameAsString())) {
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
if (!states.containsKey(table)) {
|
if (!states.containsKey(table)) {
|
||||||
LOG.warn("Found table with no state, assuming ENABLED");
|
LOG.warn(table + " has no state, assuming ENABLED");
|
||||||
MetaTableAccessor.updateTableState(connection, TableName.valueOf(table),
|
MetaTableAccessor.updateTableState(connection, TableName.valueOf(table),
|
||||||
TableState.State.ENABLED);
|
TableState.State.ENABLED);
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||||
import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
|
import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
|
||||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||||
|
import org.apache.hadoop.hbase.util.HasThread;
|
||||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
|
||||||
|
@ -216,7 +217,7 @@ public class AssignmentManager implements ServerListener {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.info("Starting assignment manager");
|
LOG.trace("Starting assignment manager");
|
||||||
|
|
||||||
// Register Server Listener
|
// Register Server Listener
|
||||||
master.getServerManager().registerListener(this);
|
master.getServerManager().registerListener(this);
|
||||||
|
@ -1182,8 +1183,7 @@ public class AssignmentManager implements ServerListener {
|
||||||
// ============================================================================================
|
// ============================================================================================
|
||||||
public void joinCluster() throws IOException {
|
public void joinCluster() throws IOException {
|
||||||
final long startTime = System.currentTimeMillis();
|
final long startTime = System.currentTimeMillis();
|
||||||
|
LOG.debug("Joining cluster...");
|
||||||
LOG.info("Joining cluster...Loading hbase:meta content.");
|
|
||||||
|
|
||||||
// Scan hbase:meta to build list of existing regions, servers, and assignment
|
// Scan hbase:meta to build list of existing regions, servers, and assignment
|
||||||
loadMeta();
|
loadMeta();
|
||||||
|
@ -1624,7 +1624,7 @@ public class AssignmentManager implements ServerListener {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void startAssignmentThread() {
|
private void startAssignmentThread() {
|
||||||
assignThread = new Thread("AssignmentThread") {
|
assignThread = new Thread(((HasThread)this.master).getName()) {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
while (isRunning()) {
|
while (isRunning()) {
|
||||||
|
|
|
@ -63,7 +63,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
* while latter will use only 1 thread for chore to scan dir.
|
* while latter will use only 1 thread for chore to scan dir.
|
||||||
*/
|
*/
|
||||||
public static final String CHORE_POOL_SIZE = "hbase.cleaner.scan.dir.concurrent.size";
|
public static final String CHORE_POOL_SIZE = "hbase.cleaner.scan.dir.concurrent.size";
|
||||||
private static final String DEFAULT_CHORE_POOL_SIZE = "0.5";
|
private static final String DEFAULT_CHORE_POOL_SIZE = "0.25";
|
||||||
|
|
||||||
// It may be waste resources for each cleaner chore own its pool,
|
// It may be waste resources for each cleaner chore own its pool,
|
||||||
// so let's make pool for all cleaner chores.
|
// so let's make pool for all cleaner chores.
|
||||||
|
@ -220,9 +220,9 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
protected void chore() {
|
protected void chore() {
|
||||||
if (getEnabled()) {
|
if (getEnabled()) {
|
||||||
if (runCleaner()) {
|
if (runCleaner()) {
|
||||||
LOG.debug("Cleaned old files/dirs under {} successfully", oldFileDir);
|
LOG.debug("Cleaned all WALs under {}", oldFileDir);
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Failed to fully clean old files/dirs under {}", oldFileDir);
|
LOG.warn("WALs outstanding under {}", oldFileDir);
|
||||||
}
|
}
|
||||||
// After each clean chore, checks if receives reconfigure notification while cleaning
|
// After each clean chore, checks if receives reconfigure notification while cleaning
|
||||||
if (reconfig.compareAndSet(true, false)) {
|
if (reconfig.compareAndSet(true, false)) {
|
||||||
|
@ -382,8 +382,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
return this.enabled.getAndSet(enabled);
|
return this.enabled.getAndSet(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean getEnabled() {
|
public boolean getEnabled() { return this.enabled.get();
|
||||||
return this.enabled.get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private interface Action<T> {
|
private interface Action<T> {
|
||||||
|
@ -405,11 +404,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Boolean compute() {
|
protected Boolean compute() {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.debug("Cleaning under {}", dir);
|
||||||
LOG.debug("CleanerTask " + Thread.currentThread().getId() +
|
|
||||||
" starts cleaning dirs and files under " + dir + " and itself.");
|
|
||||||
}
|
|
||||||
|
|
||||||
List<FileStatus> subDirs;
|
List<FileStatus> subDirs;
|
||||||
List<FileStatus> files;
|
List<FileStatus> files;
|
||||||
try {
|
try {
|
||||||
|
@ -470,13 +465,11 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
*/
|
*/
|
||||||
private boolean deleteAction(Action<Boolean> deletion, String type) {
|
private boolean deleteAction(Action<Boolean> deletion, String type) {
|
||||||
boolean deleted;
|
boolean deleted;
|
||||||
String errorMsg = null;
|
|
||||||
try {
|
try {
|
||||||
LOG.trace("Start deleting {} under {}", type, dir);
|
LOG.trace("Start deleting {} under {}", type, dir);
|
||||||
deleted = deletion.act();
|
deleted = deletion.act();
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
errorMsg = ioe.getMessage();
|
LOG.warn("Could not delete {} under {}; {}", type, dir, ioe);
|
||||||
LOG.warn("Could not delete {} under {}; {}", type, dir, errorMsg);
|
|
||||||
deleted = false;
|
deleted = false;
|
||||||
}
|
}
|
||||||
LOG.trace("Finish deleting {} under {}, deleted=", type, dir, deleted);
|
LOG.trace("Finish deleting {} under {}, deleted=", type, dir, deleted);
|
||||||
|
|
|
@ -204,7 +204,7 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
|
||||||
large.setDaemon(true);
|
large.setDaemon(true);
|
||||||
large.setName(n + "-HFileCleaner.large." + i + "-" + System.currentTimeMillis());
|
large.setName(n + "-HFileCleaner.large." + i + "-" + System.currentTimeMillis());
|
||||||
large.start();
|
large.start();
|
||||||
LOG.debug("Starting hfile cleaner for large files: {}", large);
|
LOG.debug("Starting for large file={}", large);
|
||||||
threads.add(large);
|
threads.add(large);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +219,7 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
|
||||||
small.setDaemon(true);
|
small.setDaemon(true);
|
||||||
small.setName(n + "-HFileCleaner.small." + i + "-" + System.currentTimeMillis());
|
small.setName(n + "-HFileCleaner.small." + i + "-" + System.currentTimeMillis());
|
||||||
small.start();
|
small.start();
|
||||||
LOG.debug("Starting hfile cleaner for small files: {}", small);
|
LOG.debug("Starting for small files={}", small);
|
||||||
threads.add(small);
|
threads.add(small);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,7 @@ public class LogCleaner extends CleanerChore<BaseLogCleanerDelegate> {
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<Thread> createOldWalsCleaner(int size) {
|
private List<Thread> createOldWalsCleaner(int size) {
|
||||||
LOG.info("Creating OldWALs cleaners with size: " + size);
|
LOG.info("Creating OldWALs cleaners with size=" + size);
|
||||||
|
|
||||||
List<Thread> oldWALsCleaner = new ArrayList<>(size);
|
List<Thread> oldWALsCleaner = new ArrayList<>(size);
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
|
|
|
@ -105,9 +105,8 @@ public class RSProcedureDispatcher
|
||||||
final Set<RemoteProcedure> remoteProcedures) {
|
final Set<RemoteProcedure> remoteProcedures) {
|
||||||
final int rsVersion = master.getAssignmentManager().getServerVersion(serverName);
|
final int rsVersion = master.getAssignmentManager().getServerVersion(serverName);
|
||||||
if (rsVersion >= RS_VERSION_WITH_EXEC_PROCS) {
|
if (rsVersion >= RS_VERSION_WITH_EXEC_PROCS) {
|
||||||
LOG.info(String.format(
|
LOG.trace("Using procedure batch rpc execution for serverName={} version={}",
|
||||||
"Using procedure batch rpc execution for serverName=%s version=%s",
|
serverName, rsVersion);
|
||||||
serverName, rsVersion));
|
|
||||||
submitTask(new ExecuteProceduresRemoteCall(serverName, remoteProcedures));
|
submitTask(new ExecuteProceduresRemoteCall(serverName, remoteProcedures));
|
||||||
} else {
|
} else {
|
||||||
LOG.info(String.format(
|
LOG.info(String.format(
|
||||||
|
|
|
@ -247,7 +247,7 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.debug("Starting the controller for procedure member:" + coordName);
|
LOG.debug("Starting controller for procedure member=" + coordName);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -269,8 +269,7 @@ public abstract class ZKProcedureUtil
|
||||||
}
|
}
|
||||||
|
|
||||||
public void clearChildZNodes() throws KeeperException {
|
public void clearChildZNodes() throws KeeperException {
|
||||||
LOG.info("Clearing all procedure znodes: " + acquiredZnode + " " + reachedZnode + " "
|
LOG.debug("Clearing all znodes {}, {}, {}", acquiredZnode, reachedZnode, abortZnode);
|
||||||
+ abortZnode);
|
|
||||||
|
|
||||||
// If the coordinator was shutdown mid-procedure, then we are going to lose
|
// If the coordinator was shutdown mid-procedure, then we are going to lose
|
||||||
// an procedure that was previously started by cleaning out all the previous state. Its much
|
// an procedure that was previously started by cleaning out all the previous state. Its much
|
||||||
|
|
|
@ -137,8 +137,8 @@ public class ChunkCreator {
|
||||||
// the pool has run out of maxCount
|
// the pool has run out of maxCount
|
||||||
if (chunk == null) {
|
if (chunk == null) {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("The chunk pool is full. Reached maxCount= " + this.pool.getMaxCount()
|
LOG.trace("Chunk pool full (maxCount={}); creating chunk offheap.",
|
||||||
+ ". Creating chunk onheap.");
|
this.pool.getMaxCount());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -163,8 +163,8 @@ public class ChunkCreator {
|
||||||
*/
|
*/
|
||||||
Chunk getJumboChunk(CompactingMemStore.IndexType chunkIndexType, int jumboSize) {
|
Chunk getJumboChunk(CompactingMemStore.IndexType chunkIndexType, int jumboSize) {
|
||||||
if (jumboSize <= chunkSize) {
|
if (jumboSize <= chunkSize) {
|
||||||
LOG.warn("Jumbo chunk size " + jumboSize + " must be more than regular chunk size "
|
LOG.warn("Jumbo chunk size=" + jumboSize + " must be more than regular chunk size="
|
||||||
+ chunkSize + ". Converting to regular chunk.");
|
+ chunkSize + "; converting to regular chunk.");
|
||||||
return getChunk(chunkIndexType,chunkSize);
|
return getChunk(chunkIndexType,chunkSize);
|
||||||
}
|
}
|
||||||
// the size of the allocation includes
|
// the size of the allocation includes
|
||||||
|
@ -407,9 +407,9 @@ public class ChunkCreator {
|
||||||
MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + " must be between 0.0 and 1.0");
|
MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + " must be between 0.0 and 1.0");
|
||||||
}
|
}
|
||||||
int initialCount = (int) (initialCountPercentage * maxCount);
|
int initialCount = (int) (initialCountPercentage * maxCount);
|
||||||
LOG.info("Allocating MemStoreChunkPool with chunk size "
|
LOG.info("Allocating MemStoreChunkPool with chunk size="
|
||||||
+ StringUtils.byteDesc(getChunkSize()) + ", max count " + maxCount
|
+ StringUtils.byteDesc(getChunkSize()) + ", max count=" + maxCount
|
||||||
+ ", initial count " + initialCount);
|
+ ", initial count=" + initialCount);
|
||||||
return new MemStoreChunkPool(maxCount, initialCount, poolSizePercentage);
|
return new MemStoreChunkPool(maxCount, initialCount, poolSizePercentage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -624,14 +624,14 @@ public class HRegionServer extends HasThread implements
|
||||||
masterAddressTracker = null;
|
masterAddressTracker = null;
|
||||||
clusterStatusTracker = null;
|
clusterStatusTracker = null;
|
||||||
}
|
}
|
||||||
|
this.rpcServices.start();
|
||||||
// This violates 'no starting stuff in Constructor' but Master depends on the below chore
|
// This violates 'no starting stuff in Constructor' but Master depends on the below chore
|
||||||
// and executor being created and takes a different startup route. Lots of overlap between HRS
|
// and executor being created and takes a different startup route. Lots of overlap between HRS
|
||||||
// and M (An M IS A HRS now). Need to refactor so less duplication between M and its super
|
// and M (An M IS A HRS now). Need to refactor so less duplication between M and its super
|
||||||
// Master expects Constructor to put up web servers. Ugh.
|
// Master expects Constructor to put up web servers. Ugh.
|
||||||
// class HRS. TODO.
|
// class HRS. TODO.
|
||||||
this.choreService = new ChoreService(getServerName().toString(), true);
|
this.choreService = new ChoreService(getName(), true);
|
||||||
this.executorService = new ExecutorService(getServerName().toShortString());
|
this.executorService = new ExecutorService(getName());
|
||||||
this.rpcServices.start();
|
|
||||||
putUpWebUI();
|
putUpWebUI();
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
// Make sure we log the exception. HRegionServer is often started via reflection and the
|
// Make sure we log the exception. HRegionServer is often started via reflection and the
|
||||||
|
|
|
@ -100,6 +100,7 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
|
||||||
import org.apache.hadoop.hbase.ipc.ServerRpcController;
|
import org.apache.hadoop.hbase.ipc.ServerRpcController;
|
||||||
import org.apache.hadoop.hbase.log.HBaseMarkers;
|
import org.apache.hadoop.hbase.log.HBaseMarkers;
|
||||||
import org.apache.hadoop.hbase.master.MasterRpcServices;
|
import org.apache.hadoop.hbase.master.MasterRpcServices;
|
||||||
|
import org.apache.hadoop.hbase.net.Address;
|
||||||
import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement;
|
import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement;
|
||||||
import org.apache.hadoop.hbase.quotas.OperationQuota;
|
import org.apache.hadoop.hbase.quotas.OperationQuota;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaUtil;
|
import org.apache.hadoop.hbase.quotas.QuotaUtil;
|
||||||
|
@ -1173,7 +1174,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
||||||
throw new IllegalArgumentException("Failed resolve of " + initialIsa);
|
throw new IllegalArgumentException("Failed resolve of " + initialIsa);
|
||||||
}
|
}
|
||||||
priority = createPriority();
|
priority = createPriority();
|
||||||
String name = rs.getProcessName() + "/" + initialIsa.toString();
|
String hostname = initialIsa.getHostName();
|
||||||
|
// Using Address means we don't get the IP too. Shorten it more even to just the host name
|
||||||
|
// w/o the domain.
|
||||||
|
String name = rs.getProcessName() + "/" +
|
||||||
|
Address.fromParts(initialIsa.getHostName(), initialIsa.getPort()).toStringWithoutDomain();
|
||||||
// Set how many times to retry talking to another server over Connection.
|
// Set how many times to retry talking to another server over Connection.
|
||||||
ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
|
ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
|
||||||
rpcServer = createRpcServer(rs, rs.conf, rpcSchedulerFactory, bindAddress, name);
|
rpcServer = createRpcServer(rs, rs.conf, rpcSchedulerFactory, bindAddress, name);
|
||||||
|
|
|
@ -83,10 +83,8 @@ public class ReplicationLogCleaner extends BaseLogCleanerDelegate {
|
||||||
public boolean apply(FileStatus file) {
|
public boolean apply(FileStatus file) {
|
||||||
String wal = file.getPath().getName();
|
String wal = file.getPath().getName();
|
||||||
boolean logInReplicationQueue = wals.contains(wal);
|
boolean logInReplicationQueue = wals.contains(wal);
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
if (logInReplicationQueue) {
|
if (logInReplicationQueue) {
|
||||||
LOG.debug("Found log in ZK, keeping: " + wal);
|
LOG.debug("Found up in ZooKeeper, NOT deleting={}", wal);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return !logInReplicationQueue && (file.getModificationTime() < readZKTimestamp);
|
return !logInReplicationQueue && (file.getModificationTime() < readZKTimestamp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class ReplicationPeerConfigUpgrader extends ReplicationStateZKBase {
|
||||||
// We only need to copy data from tableCFs node to rpc Node the first time hmaster start.
|
// We only need to copy data from tableCFs node to rpc Node the first time hmaster start.
|
||||||
if (rpc.getTableCFsMap() == null || rpc.getTableCFsMap().isEmpty()) {
|
if (rpc.getTableCFsMap() == null || rpc.getTableCFsMap().isEmpty()) {
|
||||||
// we copy TableCFs node into PeerNode
|
// we copy TableCFs node into PeerNode
|
||||||
LOG.info("copy tableCFs into peerNode:" + peerId);
|
LOG.info("Copy table ColumnFamilies into peer=" + peerId);
|
||||||
ReplicationProtos.TableCF[] tableCFs =
|
ReplicationProtos.TableCF[] tableCFs =
|
||||||
ReplicationPeerConfigUtil.parseTableCFs(
|
ReplicationPeerConfigUtil.parseTableCFs(
|
||||||
ZKUtil.getData(this.zookeeper, tableCFsNode));
|
ZKUtil.getData(this.zookeeper, tableCFsNode));
|
||||||
|
|
|
@ -756,7 +756,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
|
||||||
|
|
||||||
authorizationEnabled = AccessChecker.isAuthorizationSupported(conf);
|
authorizationEnabled = AccessChecker.isAuthorizationSupported(conf);
|
||||||
if (!authorizationEnabled) {
|
if (!authorizationEnabled) {
|
||||||
LOG.warn("The AccessController has been loaded with authorization checks disabled.");
|
LOG.warn("AccessController has been loaded with authorization checks DISABLED!");
|
||||||
}
|
}
|
||||||
|
|
||||||
shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
|
shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
|
||||||
|
|
|
@ -242,7 +242,7 @@ public class ZKPermissionWatcher extends ZKListener implements Closeable {
|
||||||
|
|
||||||
private void refreshAuthManager(String entry, byte[] nodeData) throws IOException {
|
private void refreshAuthManager(String entry, byte[] nodeData) throws IOException {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Updating permissions cache from node "+entry+" with data: "+
|
LOG.debug("Updating permissions cache from {} with data {}", entry,
|
||||||
Bytes.toStringBinary(nodeData));
|
Bytes.toStringBinary(nodeData));
|
||||||
}
|
}
|
||||||
if(AccessControlLists.isNamespaceEntry(entry)) {
|
if(AccessControlLists.isNamespaceEntry(entry)) {
|
||||||
|
|
|
@ -289,7 +289,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
// add hbase:meta to the response
|
// add hbase:meta to the response
|
||||||
tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
|
tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("Fetching table descriptors from the filesystem.");
|
LOG.trace("Fetching table descriptors from the filesystem.");
|
||||||
boolean allvisited = true;
|
boolean allvisited = true;
|
||||||
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
|
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
|
||||||
TableDescriptor htd = null;
|
TableDescriptor htd = null;
|
||||||
|
@ -659,9 +659,9 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
if (sequenceId <= maxSequenceId) {
|
if (sequenceId <= maxSequenceId) {
|
||||||
boolean success = FSUtils.delete(fs, path, false);
|
boolean success = FSUtils.delete(fs, path, false);
|
||||||
if (success) {
|
if (success) {
|
||||||
LOG.debug("Deleted table descriptor at " + path);
|
LOG.debug("Deleted " + path);
|
||||||
} else {
|
} else {
|
||||||
LOG.error("Failed to delete descriptor at " + path);
|
LOG.error("Failed to delete table descriptor at " + path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -713,7 +713,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
if (!fs.rename(tempPath, tableInfoDirPath)) {
|
if (!fs.rename(tempPath, tableInfoDirPath)) {
|
||||||
throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
|
throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
|
||||||
}
|
}
|
||||||
LOG.debug("Wrote descriptor into: " + tableInfoDirPath);
|
LOG.debug("Wrote into " + tableInfoDirPath);
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
// Presume clash of names or something; go around again.
|
// Presume clash of names or something; go around again.
|
||||||
LOG.debug("Failed write and/or rename; retrying", ioe);
|
LOG.debug("Failed write and/or rename; retrying", ioe);
|
||||||
|
@ -784,11 +784,11 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
}
|
}
|
||||||
FileStatus status = getTableInfoPath(fs, tableDir);
|
FileStatus status = getTableInfoPath(fs, tableDir);
|
||||||
if (status != null) {
|
if (status != null) {
|
||||||
LOG.debug("Current tableInfoPath = " + status.getPath());
|
LOG.debug("Current path=" + status.getPath());
|
||||||
if (!forceCreation) {
|
if (!forceCreation) {
|
||||||
if (fs.exists(status.getPath()) && status.getLen() > 0) {
|
if (fs.exists(status.getPath()) && status.getLen() > 0) {
|
||||||
if (readTableDescriptor(fs, status).equals(htd)) {
|
if (readTableDescriptor(fs, status).equals(htd)) {
|
||||||
LOG.debug("TableInfo already exists.. Skipping creation");
|
LOG.trace("TableInfo already exists.. Skipping creation");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -568,7 +568,7 @@ public class RecoverableZooKeeper {
|
||||||
Bytes.toStringBinary(data));
|
Bytes.toStringBinary(data));
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
LOG.debug("Node " + path + " already exists");
|
LOG.trace("Node {} already exists", path);
|
||||||
throw e;
|
throw e;
|
||||||
|
|
||||||
case CONNECTIONLOSS:
|
case CONNECTIONLOSS:
|
||||||
|
|
Loading…
Reference in New Issue