Fix broke build
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@939871 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
895ccff4f3
commit
9ad652e9ba
|
@ -249,4 +249,4 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
this.intransition.put(key, value);
|
this.intransition.put(key, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,22 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2010 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
package org.apache.hadoop.hbase.master;
|
package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
|
@ -24,7 +24,7 @@ import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
|
|
||||||
|
@ -74,12 +74,12 @@ public class JVMClusterUtil {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @return Region server added.
|
* @return Region server added.
|
||||||
*/
|
*/
|
||||||
public static JVMClusterUtil.RegionServerThread createRegionServerThread(final HBaseConfiguration c,
|
public static JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration c,
|
||||||
final Class<? extends HRegionServer> hrsc, final int index)
|
final Class<? extends HRegionServer> hrsc, final int index)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HRegionServer server;
|
HRegionServer server;
|
||||||
try {
|
try {
|
||||||
server = hrsc.getConstructor(HBaseConfiguration.class).newInstance(c);
|
server = hrsc.getConstructor(Configuration.class).newInstance(c);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
IOException ioe = new IOException();
|
IOException ioe = new IOException();
|
||||||
ioe.initCause(e);
|
ioe.initCause(e);
|
||||||
|
@ -112,15 +112,6 @@ public class JVMClusterUtil {
|
||||||
public static void shutdown(final HMaster master,
|
public static void shutdown(final HMaster master,
|
||||||
final List<RegionServerThread> regionservers) {
|
final List<RegionServerThread> regionservers) {
|
||||||
LOG.debug("Shutting down HBase Cluster");
|
LOG.debug("Shutting down HBase Cluster");
|
||||||
// Be careful how the hdfs shutdown thread runs in context where more than
|
|
||||||
// one regionserver in the mix.
|
|
||||||
Thread hdfsClientFinalizer = null;
|
|
||||||
for (JVMClusterUtil.RegionServerThread t: regionservers) {
|
|
||||||
Thread tt = t.getRegionServer().setHDFSShutdownThreadOnExit(null);
|
|
||||||
if (hdfsClientFinalizer == null && tt != null) {
|
|
||||||
hdfsClientFinalizer = tt;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (master != null) {
|
if (master != null) {
|
||||||
master.shutdown();
|
master.shutdown();
|
||||||
}
|
}
|
||||||
|
@ -147,16 +138,8 @@ public class JVMClusterUtil {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (hdfsClientFinalizer != null) {
|
|
||||||
// Don't run the shutdown thread. Plays havoc if we try to start a
|
|
||||||
// minihbasecluster immediately after this one has gone down (In
|
|
||||||
// Filesystem, the shutdown thread is kept in a static and is created
|
|
||||||
// on classloading. Can only run it once).
|
|
||||||
// hdfsClientFinalizer.start();
|
|
||||||
// Threads.shutdown(hdfsClientFinalizer);
|
|
||||||
}
|
|
||||||
LOG.info("Shutdown " +
|
LOG.info("Shutdown " +
|
||||||
((regionservers != null)? master.getName(): "0 masters") +
|
((regionservers != null)? master.getName(): "0 masters") +
|
||||||
" " + regionservers.size() + " region server(s)");
|
" " + regionservers.size() + " region server(s)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,4 +44,4 @@ public class TestRegionServerOperationQueue {
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue