HBASE-2036 Use Configuration instead of HBaseConfiguration
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@894934 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
83e58c6190
commit
3339e94e8d
|
@ -274,6 +274,7 @@ Release 0.21.0 - Unreleased
|
|||
HBASE-1956 Export HDFS read and write latency as a metric
|
||||
HBASE-2080 [EC2] Support multivolume local instance storage
|
||||
HBASE-2083 [EC2] HDFS DataNode no longer required on master
|
||||
HBASE-2036 Use Configuration instead of HBaseConfiguration (Enis Soztutar via Stack)
|
||||
|
||||
NEW FEATURES
|
||||
HBASE-1901 "General" partitioner for "hbase-48" bulk (behind the api, write
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
|
@ -43,7 +44,7 @@ public class RESTServlet extends ServletAdaptor {
|
|||
|
||||
private static RESTServlet instance;
|
||||
|
||||
private transient final HBaseConfiguration conf;
|
||||
private transient final Configuration conf;
|
||||
private transient final HTablePool pool;
|
||||
protected Map<String,Integer> maxAgeMap =
|
||||
Collections.synchronizedMap(new HashMap<String,Integer>());
|
||||
|
@ -64,7 +65,7 @@ public class RESTServlet extends ServletAdaptor {
|
|||
* @throws IOException
|
||||
*/
|
||||
public RESTServlet() throws IOException {
|
||||
this.conf = new HBaseConfiguration();
|
||||
this.conf = HBaseConfiguration.create();
|
||||
this.pool = new HTablePool(conf, 10);
|
||||
}
|
||||
|
||||
|
@ -80,7 +81,7 @@ public class RESTServlet extends ServletAdaptor {
|
|||
/**
|
||||
* @return the servlet's global HBase configuration
|
||||
*/
|
||||
protected HBaseConfiguration getConfiguration() {
|
||||
protected Configuration getConfiguration() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ import javax.ws.rs.core.Response.ResponseBuilder;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
|
||||
|
@ -59,7 +60,7 @@ public class StorageClusterVersionResource implements Constants {
|
|||
}
|
||||
try {
|
||||
RESTServlet server = RESTServlet.getInstance();
|
||||
HBaseConfiguration hconf = server.getConfiguration();
|
||||
Configuration hconf = server.getConfiguration();
|
||||
HBaseAdmin admin = new HBaseAdmin(hconf);
|
||||
StorageClusterVersionModel model = new StorageClusterVersionModel();
|
||||
model.setVersion(admin.getClusterStatus().getHBaseVersion());
|
||||
|
|
|
@ -34,10 +34,9 @@ import java.util.TreeSet;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.Leases;
|
||||
|
@ -49,20 +48,20 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification;
|
||||
import org.apache.hadoop.hbase.client.tableindexed.IndexedTableDescriptor;
|
||||
import org.apache.hadoop.hbase.regionserver.FlushRequester;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
class IndexedRegion extends TransactionalRegion {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(IndexedRegion.class);
|
||||
|
||||
private final HBaseConfiguration conf;
|
||||
private final Configuration conf;
|
||||
private final IndexedTableDescriptor indexTableDescriptor;
|
||||
private Map<IndexSpecification, HTable> indexSpecToTable = new HashMap<IndexSpecification, HTable>();
|
||||
|
||||
public IndexedRegion(final Path basedir, final HLog log, final FileSystem fs,
|
||||
final HBaseConfiguration conf, final HRegionInfo regionInfo,
|
||||
final Configuration conf, final HRegionInfo regionInfo,
|
||||
final FlushRequester flushListener, Leases trxLeases) throws IOException {
|
||||
super(basedir, log, fs, conf, regionInfo, flushListener, trxLeases);
|
||||
this.indexTableDescriptor = new IndexedTableDescriptor(regionInfo.getTableDesc());
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver.tableindexed;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
|
@ -37,7 +38,7 @@ import org.apache.hadoop.util.Progressable;
|
|||
public class IndexedRegionServer extends TransactionalRegionServer implements
|
||||
IndexedRegionInterface {
|
||||
|
||||
public IndexedRegionServer(HBaseConfiguration conf) throws IOException {
|
||||
public IndexedRegionServer(Configuration conf) throws IOException {
|
||||
super(conf);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,10 +23,10 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.regionserver.wal.LogRollListener;
|
|||
*/
|
||||
class THLog extends HLog {
|
||||
|
||||
public THLog(FileSystem fs, Path dir, HBaseConfiguration conf,
|
||||
public THLog(FileSystem fs, Path dir, Configuration conf,
|
||||
LogRollListener listener) throws IOException {
|
||||
super(fs, dir, conf, listener);
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
|
@ -53,7 +53,7 @@ class THLogRecoveryManager {
|
|||
|
||||
private final FileSystem fileSystem;
|
||||
private final HRegionInfo regionInfo;
|
||||
private final HBaseConfiguration conf;
|
||||
private final Configuration conf;
|
||||
|
||||
/**
|
||||
* @param region
|
||||
|
@ -66,7 +66,7 @@ class THLogRecoveryManager {
|
|||
|
||||
// For Testing
|
||||
THLogRecoveryManager(final FileSystem fileSystem,
|
||||
final HRegionInfo regionInfo, final HBaseConfiguration conf) {
|
||||
final HRegionInfo regionInfo, final Configuration conf) {
|
||||
this.fileSystem = fileSystem;
|
||||
this.regionInfo = regionInfo;
|
||||
this.conf = conf;
|
||||
|
|
|
@ -37,9 +37,9 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.LeaseException;
|
||||
|
@ -55,12 +55,12 @@ import org.apache.hadoop.hbase.client.transactional.HBaseBackedTransactionLogger
|
|||
import org.apache.hadoop.hbase.client.transactional.UnknownTransactionException;
|
||||
import org.apache.hadoop.hbase.ipc.TransactionalRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.FlushRequester;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.transactional.TransactionState.Status;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
/**
|
||||
|
@ -116,7 +116,7 @@ public class TransactionalRegion extends HRegion {
|
|||
* @param flushListener
|
||||
*/
|
||||
public TransactionalRegion(final Path basedir, final HLog log,
|
||||
final FileSystem fs, final HBaseConfiguration conf,
|
||||
final FileSystem fs, final Configuration conf,
|
||||
final HRegionInfo regionInfo, final FlushRequester flushListener,
|
||||
final Leases transactionalLeases) {
|
||||
super(basedir, log, fs, conf, regionInfo, flushListener);
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.lang.Thread.UncaughtExceptionHandler;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.Leases;
|
||||
|
@ -71,7 +71,7 @@ public class TransactionalRegionServer extends HRegionServer implements
|
|||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public TransactionalRegionServer(final HBaseConfiguration conf)
|
||||
public TransactionalRegionServer(final Configuration conf)
|
||||
throws IOException {
|
||||
super(conf);
|
||||
cleanOldTransactionsThread = new CleanOldTransactionsChore(this,
|
||||
|
|
|
@ -22,32 +22,71 @@ package org.apache.hadoop.hbase;
|
|||
import java.util.Iterator;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* Adds HBase configuration files to a Configuration
|
||||
*/
|
||||
public class HBaseConfiguration extends Configuration {
|
||||
/** constructor */
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(HBaseConfiguration.class);
|
||||
|
||||
/**
|
||||
* Instantinating HBaseConfiguration() is deprecated. Please use
|
||||
* HBaseConfiguration#create() to construct a plain Configuration
|
||||
*/
|
||||
@Deprecated
|
||||
public HBaseConfiguration() {
|
||||
//TODO:replace with private constructor, HBaseConfiguration should not extend Configuration
|
||||
super();
|
||||
addHbaseResources();
|
||||
addHbaseResources(this);
|
||||
LOG.warn("instantinating HBaseConfiguration() is deprecated. Please use" +
|
||||
" HBaseConfiguration#create() to construct a plain Configuration");
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a clone of passed configuration.
|
||||
* @param c Configuration to clone.
|
||||
* Instantinating HBaseConfiguration() is deprecated. Please use
|
||||
* HBaseConfiguration#create(conf) to construct a plain Configuration
|
||||
*/
|
||||
@Deprecated
|
||||
public HBaseConfiguration(final Configuration c) {
|
||||
this();
|
||||
//TODO:replace with private constructor
|
||||
for (Entry<String, String>e: c) {
|
||||
set(e.getKey(), e.getValue());
|
||||
}
|
||||
LOG.warn("instantinating HBaseConfiguration() is deprecated. Please use " +
|
||||
"HBaseConfiguration#create(conf) to construct a plain Configuration");
|
||||
}
|
||||
|
||||
private void addHbaseResources() {
|
||||
addResource("hbase-default.xml");
|
||||
addResource("hbase-site.xml");
|
||||
public static Configuration addHbaseResources(Configuration conf) {
|
||||
conf.addResource("hbase-default.xml");
|
||||
conf.addResource("hbase-site.xml");
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Configuration with HBase resources
|
||||
* @return a Configuration with HBase resources
|
||||
*/
|
||||
public static Configuration create() {
|
||||
Configuration conf = new Configuration();
|
||||
return addHbaseResources(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a clone of passed configuration.
|
||||
* @param that Configuration to clone.
|
||||
* @return a Configuration created with the hbase-*.xml files plus
|
||||
* the given configuration.
|
||||
*/
|
||||
public static Configuration create(final Configuration that) {
|
||||
Configuration conf = create();
|
||||
for (Entry<String, String>e: that) {
|
||||
conf.set(e.getKey(), e.getValue());
|
||||
}
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -57,10 +96,21 @@ public class HBaseConfiguration extends Configuration {
|
|||
* @see Configuration#iterator() How the entries are obtained.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public int hashCode() {
|
||||
return hashCode(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the hash code value for this HBaseConfiguration. The hash code of a
|
||||
* Configuration is defined by the xor of the hash codes of its entries.
|
||||
*
|
||||
* @see Configuration#iterator() How the entries are obtained.
|
||||
*/
|
||||
public static int hashCode(Configuration conf) {
|
||||
int hash = 0;
|
||||
|
||||
Iterator<Entry<String, String>> propertyIterator = this.iterator();
|
||||
Iterator<Entry<String, String>> propertyIterator = conf.iterator();
|
||||
while (propertyIterator.hasNext()) {
|
||||
hash ^= propertyIterator.next().hashCode();
|
||||
}
|
||||
|
@ -75,7 +125,6 @@ public class HBaseConfiguration extends Configuration {
|
|||
return false;
|
||||
if (!(obj instanceof HBaseConfiguration))
|
||||
return false;
|
||||
|
||||
HBaseConfiguration otherConf = (HBaseConfiguration) obj;
|
||||
if (size() != otherConf.size()) {
|
||||
return false;
|
||||
|
@ -89,9 +138,7 @@ public class HBaseConfiguration extends Configuration {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Random;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
|
@ -35,11 +36,11 @@ import org.apache.hadoop.hbase.client.HConnectionManager;
|
|||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
||||
|
@ -71,7 +72,7 @@ class HMerge implements HConstants {
|
|||
* @param tableName - Table to be compacted
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void merge(HBaseConfiguration conf, FileSystem fs,
|
||||
public static void merge(Configuration conf, FileSystem fs,
|
||||
final byte [] tableName)
|
||||
throws IOException {
|
||||
HConnection connection = HConnectionManager.getConnection(conf);
|
||||
|
@ -93,14 +94,14 @@ class HMerge implements HConstants {
|
|||
}
|
||||
|
||||
private static abstract class Merger {
|
||||
protected final HBaseConfiguration conf;
|
||||
protected final Configuration conf;
|
||||
protected final FileSystem fs;
|
||||
protected final Path tabledir;
|
||||
protected final HLog hlog;
|
||||
private final long maxFilesize;
|
||||
|
||||
|
||||
protected Merger(HBaseConfiguration conf, FileSystem fs,
|
||||
protected Merger(Configuration conf, FileSystem fs,
|
||||
final byte [] tableName)
|
||||
throws IOException {
|
||||
this.conf = conf;
|
||||
|
@ -197,7 +198,7 @@ class HMerge implements HConstants {
|
|||
private final ResultScanner metaScanner;
|
||||
private HRegionInfo latestRegion;
|
||||
|
||||
OnlineMerger(HBaseConfiguration conf, FileSystem fs,
|
||||
OnlineMerger(Configuration conf, FileSystem fs,
|
||||
final byte [] tableName)
|
||||
throws IOException {
|
||||
super(conf, fs, tableName);
|
||||
|
@ -313,7 +314,7 @@ class HMerge implements HConstants {
|
|||
private final List<HRegionInfo> metaRegions = new ArrayList<HRegionInfo>();
|
||||
private final HRegion root;
|
||||
|
||||
OfflineMerger(HBaseConfiguration conf, FileSystem fs)
|
||||
OfflineMerger(Configuration conf, FileSystem fs)
|
||||
throws IOException {
|
||||
|
||||
super(conf, fs, META_TABLE_NAME);
|
||||
|
|
|
@ -64,7 +64,7 @@ public class LocalHBaseCluster implements HConstants {
|
|||
public static final String LOCAL = "local";
|
||||
/** 'local:' */
|
||||
public static final String LOCAL_COLON = LOCAL + ":";
|
||||
private final HBaseConfiguration conf;
|
||||
private final Configuration conf;
|
||||
private final Class<? extends HRegionServer> regionServerClass;
|
||||
|
||||
/**
|
||||
|
@ -72,7 +72,7 @@ public class LocalHBaseCluster implements HConstants {
|
|||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public LocalHBaseCluster(final HBaseConfiguration conf)
|
||||
public LocalHBaseCluster(final Configuration conf)
|
||||
throws IOException {
|
||||
this(conf, DEFAULT_NO);
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ public class LocalHBaseCluster implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public LocalHBaseCluster(final HBaseConfiguration conf,
|
||||
public LocalHBaseCluster(final Configuration conf,
|
||||
final int noRegionServers)
|
||||
throws IOException {
|
||||
this.conf = conf;
|
||||
|
@ -113,7 +113,7 @@ public class LocalHBaseCluster implements HConstants {
|
|||
synchronized (regionThreads) {
|
||||
HRegionServer server;
|
||||
try {
|
||||
server = regionServerClass.getConstructor(HBaseConfiguration.class).
|
||||
server = regionServerClass.getConstructor(Configuration.class).
|
||||
newInstance(conf);
|
||||
} catch (Exception e) {
|
||||
IOException ioe = new IOException();
|
||||
|
@ -324,7 +324,7 @@ public class LocalHBaseCluster implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
public static void main(String[] args) throws IOException {
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
|
||||
cluster.startup();
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.NavigableMap;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -53,7 +54,7 @@ public class HBaseAdmin {
|
|||
private final Log LOG = LogFactory.getLog(this.getClass().getName());
|
||||
// private final HConnection connection;
|
||||
final HConnection connection;
|
||||
private volatile HBaseConfiguration conf;
|
||||
private volatile Configuration conf;
|
||||
private final long pause;
|
||||
private final int numRetries;
|
||||
private volatile HMasterInterface master;
|
||||
|
@ -64,7 +65,7 @@ public class HBaseAdmin {
|
|||
* @param conf Configuration object
|
||||
* @throws MasterNotRunningException
|
||||
*/
|
||||
public HBaseAdmin(HBaseConfiguration conf) throws MasterNotRunningException {
|
||||
public HBaseAdmin(Configuration conf) throws MasterNotRunningException {
|
||||
this.connection = HConnectionManager.getConnection(conf);
|
||||
this.conf = conf;
|
||||
this.pause = conf.getLong("hbase.client.pause", 30 * 1000);
|
||||
|
@ -852,9 +853,9 @@ public class HBaseAdmin {
|
|||
* @param conf
|
||||
* @throws MasterNotRunningException
|
||||
*/
|
||||
public static void checkHBaseAvailable(HBaseConfiguration conf)
|
||||
public static void checkHBaseAvailable(Configuration conf)
|
||||
throws MasterNotRunningException {
|
||||
HBaseConfiguration copyOfConf = new HBaseConfiguration(conf);
|
||||
Configuration copyOfConf = HBaseConfiguration.create(conf);
|
||||
copyOfConf.setInt("hbase.client.retries.number", 1);
|
||||
new HBaseAdmin(copyOfConf);
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -92,11 +93,11 @@ public class HConnectionManager implements HConstants {
|
|||
// synchronized access to them. We set instances to 31. The zk default max
|
||||
// connections is 30 so should run into zk issues before hit this value of 31.
|
||||
private static
|
||||
final Map<HBaseConfiguration, TableServers> HBASE_INSTANCES =
|
||||
new LinkedHashMap<HBaseConfiguration, TableServers>
|
||||
final Map<Integer, TableServers> HBASE_INSTANCES =
|
||||
new LinkedHashMap<Integer, TableServers>
|
||||
((int) (MAX_CACHED_HBASE_INSTANCES/0.75F)+1, 0.75F, true) {
|
||||
@Override
|
||||
protected boolean removeEldestEntry(Map.Entry<HBaseConfiguration, TableServers> eldest) {
|
||||
protected boolean removeEldestEntry(Map.Entry<Integer, TableServers> eldest) {
|
||||
return size() > MAX_CACHED_HBASE_INSTANCES;
|
||||
}
|
||||
};
|
||||
|
@ -110,13 +111,14 @@ public class HConnectionManager implements HConstants {
|
|||
* @param conf
|
||||
* @return HConnection object for the instance specified by the configuration
|
||||
*/
|
||||
public static HConnection getConnection(HBaseConfiguration conf) {
|
||||
public static HConnection getConnection(Configuration conf) {
|
||||
TableServers connection;
|
||||
Integer key = HBaseConfiguration.hashCode(conf);
|
||||
synchronized (HBASE_INSTANCES) {
|
||||
connection = HBASE_INSTANCES.get(conf);
|
||||
connection = HBASE_INSTANCES.get(key);
|
||||
if (connection == null) {
|
||||
connection = new TableServers(conf);
|
||||
HBASE_INSTANCES.put(conf, connection);
|
||||
HBASE_INSTANCES.put(key, connection);
|
||||
}
|
||||
}
|
||||
return connection;
|
||||
|
@ -127,7 +129,7 @@ public class HConnectionManager implements HConstants {
|
|||
* @param conf
|
||||
* @param stopProxy
|
||||
*/
|
||||
public static void deleteConnectionInfo(HBaseConfiguration conf,
|
||||
public static void deleteConnectionInfo(Configuration conf,
|
||||
boolean stopProxy) {
|
||||
synchronized (HBASE_INSTANCES) {
|
||||
TableServers t = HBASE_INSTANCES.remove(conf);
|
||||
|
@ -165,7 +167,7 @@ public class HConnectionManager implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
public static synchronized ClientZKWatcher getClientZooKeeperWatcher(
|
||||
HBaseConfiguration conf) throws IOException {
|
||||
Configuration conf) throws IOException {
|
||||
if (!ZK_WRAPPERS.containsKey(conf.get(HConstants.ZOOKEEPER_QUORUM))) {
|
||||
ZK_WRAPPERS.put(conf.get(HConstants.ZOOKEEPER_QUORUM),
|
||||
new ClientZKWatcher(conf));
|
||||
|
@ -182,14 +184,14 @@ public class HConnectionManager implements HConstants {
|
|||
|
||||
static final Log LOG = LogFactory.getLog(ClientZKWatcher.class);
|
||||
private ZooKeeperWrapper zooKeeperWrapper;
|
||||
private HBaseConfiguration conf;
|
||||
private Configuration conf;
|
||||
|
||||
/**
|
||||
* Takes a configuration to pass it to ZKW but won't instanciate it
|
||||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public ClientZKWatcher(HBaseConfiguration conf) {
|
||||
public ClientZKWatcher(Configuration conf) {
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
|
@ -254,7 +256,7 @@ public class HConnectionManager implements HConstants {
|
|||
private final Object metaRegionLock = new Object();
|
||||
private final Object userRegionLock = new Object();
|
||||
|
||||
private volatile HBaseConfiguration conf;
|
||||
private volatile Configuration conf;
|
||||
|
||||
// Known region HServerAddress.toString() -> HRegionInterface
|
||||
private final Map<String, HRegionInterface> servers =
|
||||
|
@ -272,7 +274,7 @@ public class HConnectionManager implements HConstants {
|
|||
* @param conf Configuration object
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public TableServers(HBaseConfiguration conf) {
|
||||
public TableServers(Configuration conf) {
|
||||
this.conf = conf;
|
||||
|
||||
String serverClassName =
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.TreeMap;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -57,7 +58,7 @@ public class HTable implements HTableInterface {
|
|||
private final HConnection connection;
|
||||
private final byte [] tableName;
|
||||
protected final int scannerTimeout;
|
||||
private volatile HBaseConfiguration configuration;
|
||||
private volatile Configuration configuration;
|
||||
private final ArrayList<Put> writeBuffer = new ArrayList<Put>();
|
||||
private long writeBufferSize;
|
||||
private boolean autoFlush;
|
||||
|
@ -73,7 +74,7 @@ public class HTable implements HTableInterface {
|
|||
*/
|
||||
public HTable(final String tableName)
|
||||
throws IOException {
|
||||
this(new HBaseConfiguration(), Bytes.toBytes(tableName));
|
||||
this(HBaseConfiguration.create(), Bytes.toBytes(tableName));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -84,7 +85,7 @@ public class HTable implements HTableInterface {
|
|||
*/
|
||||
public HTable(final byte [] tableName)
|
||||
throws IOException {
|
||||
this(new HBaseConfiguration(), tableName);
|
||||
this(HBaseConfiguration.create(), tableName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -94,7 +95,7 @@ public class HTable implements HTableInterface {
|
|||
* @param tableName name of the table
|
||||
* @throws IOException
|
||||
*/
|
||||
public HTable(HBaseConfiguration conf, final String tableName)
|
||||
public HTable(Configuration conf, final String tableName)
|
||||
throws IOException {
|
||||
this(conf, Bytes.toBytes(tableName));
|
||||
}
|
||||
|
@ -106,7 +107,7 @@ public class HTable implements HTableInterface {
|
|||
* @param tableName name of the table
|
||||
* @throws IOException
|
||||
*/
|
||||
public HTable(HBaseConfiguration conf, final byte [] tableName)
|
||||
public HTable(Configuration conf, final byte [] tableName)
|
||||
throws IOException {
|
||||
this.tableName = tableName;
|
||||
if (conf == null) {
|
||||
|
@ -140,7 +141,7 @@ public class HTable implements HTableInterface {
|
|||
* @throws IOException
|
||||
*/
|
||||
public static boolean isTableEnabled(byte[] tableName) throws IOException {
|
||||
return isTableEnabled(new HBaseConfiguration(), tableName);
|
||||
return isTableEnabled(HBaseConfiguration.create(), tableName);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -149,7 +150,7 @@ public class HTable implements HTableInterface {
|
|||
* @return true if table is on-line
|
||||
* @throws IOException
|
||||
*/
|
||||
public static boolean isTableEnabled(HBaseConfiguration conf, String tableName)
|
||||
public static boolean isTableEnabled(Configuration conf, String tableName)
|
||||
throws IOException {
|
||||
return isTableEnabled(conf, Bytes.toBytes(tableName));
|
||||
}
|
||||
|
@ -160,7 +161,7 @@ public class HTable implements HTableInterface {
|
|||
* @return true if table is on-line
|
||||
* @throws IOException
|
||||
*/
|
||||
public static boolean isTableEnabled(HBaseConfiguration conf, byte[] tableName)
|
||||
public static boolean isTableEnabled(Configuration conf, byte[] tableName)
|
||||
throws IOException {
|
||||
return HConnectionManager.getConnection(conf).isTableEnabled(tableName);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* Factory for creating HTable instances.
|
||||
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
|
|||
public class HTableFactory implements HTableInterfaceFactory {
|
||||
|
||||
@Override
|
||||
public HTableInterface createHTableInterface(HBaseConfiguration config,
|
||||
public HTableInterface createHTableInterface(Configuration config,
|
||||
byte[] tableName) {
|
||||
try {
|
||||
return new HTable(config, tableName);
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
|
||||
/**
|
||||
* Defines methods to create new HTableInterface.
|
||||
|
@ -35,5 +36,5 @@ public interface HTableInterfaceFactory {
|
|||
* @param tableName name of the HBase table.
|
||||
* @return HTableInterface instance.
|
||||
*/
|
||||
HTableInterface createHTableInterface(HBaseConfiguration config, byte[] tableName);
|
||||
HTableInterface createHTableInterface(Configuration config, byte[] tableName);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.HashMap;
|
|||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
@ -41,7 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
public class HTablePool {
|
||||
private final Map<String, LinkedList<HTableInterface>> tables =
|
||||
Collections.synchronizedMap(new HashMap<String, LinkedList<HTableInterface>>());
|
||||
private final HBaseConfiguration config;
|
||||
private final Configuration config;
|
||||
private final int maxSize;
|
||||
private HTableInterfaceFactory tableFactory = new HTableFactory();
|
||||
|
||||
|
@ -49,7 +50,7 @@ public class HTablePool {
|
|||
* Default Constructor. Default HBaseConfiguration and no limit on pool size.
|
||||
*/
|
||||
public HTablePool() {
|
||||
this(new HBaseConfiguration(), Integer.MAX_VALUE);
|
||||
this(HBaseConfiguration.create(), Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -57,12 +58,12 @@ public class HTablePool {
|
|||
* @param config configuration
|
||||
* @param maxSize maximum number of references to keep for each table
|
||||
*/
|
||||
public HTablePool(HBaseConfiguration config, int maxSize) {
|
||||
public HTablePool(Configuration config, int maxSize) {
|
||||
this.config = config;
|
||||
this.maxSize = maxSize;
|
||||
}
|
||||
|
||||
public HTablePool(HBaseConfiguration config, int maxSize, HTableInterfaceFactory tableFactory) {
|
||||
public HTablePool(Configuration config, int maxSize, HTableInterfaceFactory tableFactory) {
|
||||
this.config = config;
|
||||
this.maxSize = maxSize;
|
||||
this.tableFactory = tableFactory;
|
||||
|
|
|
@ -2,7 +2,7 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -22,7 +22,7 @@ class MetaScanner implements HConstants {
|
|||
* @param visitor A custom visitor
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void metaScan(HBaseConfiguration configuration,
|
||||
public static void metaScan(Configuration configuration,
|
||||
MetaScannerVisitor visitor)
|
||||
throws IOException {
|
||||
metaScan(configuration, visitor, EMPTY_START_ROW);
|
||||
|
@ -37,7 +37,7 @@ class MetaScanner implements HConstants {
|
|||
* @param tableName
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void metaScan(HBaseConfiguration configuration,
|
||||
public static void metaScan(Configuration configuration,
|
||||
MetaScannerVisitor visitor, byte[] tableName)
|
||||
throws IOException {
|
||||
HConnection connection = HConnectionManager.getConnection(configuration);
|
||||
|
|
|
@ -20,7 +20,8 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
|
||||
/**
|
||||
* Used by server processes to expose HServerConnection method
|
||||
|
@ -38,7 +39,7 @@ public class ServerConnectionManager extends HConnectionManager {
|
|||
* @param conf
|
||||
* @return HConnection object for the instance specified by the configuration
|
||||
*/
|
||||
public static ServerConnection getConnection(HBaseConfiguration conf) {
|
||||
public static ServerConnection getConnection(Configuration conf) {
|
||||
return (ServerConnection) HConnectionManager.getConnection(conf);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ public class FilterList implements Filter {
|
|||
}
|
||||
|
||||
public void readFields(final DataInput in) throws IOException {
|
||||
Configuration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
byte opByte = in.readByte();
|
||||
operator = Operator.values()[opByte];
|
||||
int size = in.readInt();
|
||||
|
@ -217,7 +217,7 @@ public class FilterList implements Filter {
|
|||
}
|
||||
|
||||
public void write(final DataOutput out) throws IOException {
|
||||
Configuration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
out.writeByte(operator.ordinal());
|
||||
out.writeInt(filters.size());
|
||||
for (Filter filter : filters) {
|
||||
|
|
|
@ -38,19 +38,20 @@ import org.apache.commons.cli.Options;
|
|||
import org.apache.commons.cli.PosixParser;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
|
@ -1706,7 +1707,7 @@ public class HFile {
|
|||
boolean checkRow = cmd.hasOption("k");
|
||||
boolean checkFamily = cmd.hasOption("a");
|
||||
// get configuration, file system and get list of files
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
conf.set("fs.defaultFS",
|
||||
conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR));
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
|
|
|
@ -190,7 +190,7 @@ public class BuildTableIndex {
|
|||
* @throws Exception When running the job fails.
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
String[] otherArgs =
|
||||
new GenericOptionsParser(conf, args).getRemainingArgs();
|
||||
Job job = createSubmittableJob(conf, otherArgs);
|
||||
|
|
|
@ -119,7 +119,7 @@ public class Export {
|
|||
* @throws Exception When running the job fails.
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
|
||||
if (otherArgs.length < 2) {
|
||||
usage("Wrong number of arguments: " + otherArgs.length);
|
||||
|
|
|
@ -113,7 +113,8 @@ implements Configurable {
|
|||
public void setConf(Configuration configuration) {
|
||||
this.conf = configuration;
|
||||
try {
|
||||
this.table = new HTable(new HBaseConfiguration(conf),
|
||||
HBaseConfiguration.addHbaseResources(conf);
|
||||
this.table = new HTable(this.conf,
|
||||
configuration.get(TableOutputFormat.OUTPUT_TABLE));
|
||||
} catch (IOException e) {
|
||||
LOG.error(e);
|
||||
|
|
|
@ -114,7 +114,7 @@ public class Import {
|
|||
* @throws Exception When running the job fails.
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
|
||||
if (otherArgs.length < 2) {
|
||||
usage("Wrong number of arguments: " + otherArgs.length);
|
||||
|
|
|
@ -69,22 +69,22 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
|
|||
RecordWriter<ImmutableBytesWritable, Writable> {
|
||||
private static final Log LOG = LogFactory.getLog(MultiTableRecordWriter.class);
|
||||
Map<ImmutableBytesWritable, HTable> tables;
|
||||
HBaseConfiguration config;
|
||||
Configuration conf;
|
||||
boolean useWriteAheadLogging;
|
||||
|
||||
/**
|
||||
* @param config
|
||||
* @param conf
|
||||
* HBaseConfiguration to used
|
||||
* @param useWriteAheadLogging
|
||||
* whether to use write ahead logging. This can be turned off (
|
||||
* <tt>false</tt>) to improve performance when bulk loading data.
|
||||
*/
|
||||
public MultiTableRecordWriter(HBaseConfiguration config,
|
||||
public MultiTableRecordWriter(Configuration conf,
|
||||
boolean useWriteAheadLogging) {
|
||||
LOG.debug("Created new MultiTableRecordReader with WAL "
|
||||
+ (useWriteAheadLogging ? "on" : "off"));
|
||||
this.tables = new HashMap<ImmutableBytesWritable, HTable>();
|
||||
this.config = config;
|
||||
this.conf = conf;
|
||||
this.useWriteAheadLogging = useWriteAheadLogging;
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
|
|||
HTable getTable(ImmutableBytesWritable tableName) throws IOException {
|
||||
if (!tables.containsKey(tableName)) {
|
||||
LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing");
|
||||
HTable table = new HTable(config, tableName.get());
|
||||
HTable table = new HTable(conf, tableName.get());
|
||||
table.setAutoFlush(false);
|
||||
tables.put(tableName, table);
|
||||
}
|
||||
|
@ -155,9 +155,9 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
|
|||
@Override
|
||||
public RecordWriter<ImmutableBytesWritable, Writable> getRecordWriter(TaskAttemptContext context)
|
||||
throws IOException, InterruptedException {
|
||||
Configuration configuration = context.getConfiguration();
|
||||
return new MultiTableRecordWriter(new HBaseConfiguration(configuration),
|
||||
configuration.getBoolean(WAL_PROPERTY, WAL_ON));
|
||||
Configuration conf = context.getConfiguration();
|
||||
return new MultiTableRecordWriter(HBaseConfiguration.create(conf),
|
||||
conf.getBoolean(WAL_PROPERTY, WAL_ON));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ public class RowCounter {
|
|||
* @throws Exception When running the job fails.
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
|
||||
if (otherArgs.length < 1) {
|
||||
System.err.println("ERROR: Wrong number of parameters: " + args.length);
|
||||
|
|
|
@ -70,7 +70,7 @@ implements Configurable {
|
|||
this.conf = configuration;
|
||||
String tableName = conf.get(INPUT_TABLE);
|
||||
try {
|
||||
setHTable(new HTable(new HBaseConfiguration(conf), tableName));
|
||||
setHTable(new HTable(HBaseConfiguration.create(conf), tableName));
|
||||
} catch (Exception e) {
|
||||
LOG.error(StringUtils.stringifyException(e));
|
||||
}
|
||||
|
|
|
@ -129,9 +129,9 @@ public class TableMapReduceUtil {
|
|||
job.setOutputKeyClass(ImmutableBytesWritable.class);
|
||||
job.setOutputValueClass(Writable.class);
|
||||
if (partitioner == HRegionPartitioner.class) {
|
||||
HBaseConfiguration.addHbaseResources(job.getConfiguration());
|
||||
job.setPartitionerClass(HRegionPartitioner.class);
|
||||
HTable outputTable = new HTable(new HBaseConfiguration(
|
||||
job.getConfiguration()), table);
|
||||
HTable outputTable = new HTable(job.getConfiguration(), table);
|
||||
int regions = outputTable.getRegionsInfo().size();
|
||||
if (job.getNumReduceTasks() > regions) {
|
||||
job.setNumReduceTasks(outputTable.getRegionsInfo().size());
|
||||
|
@ -151,8 +151,7 @@ public class TableMapReduceUtil {
|
|||
*/
|
||||
public static void limitNumReduceTasks(String table, Job job)
|
||||
throws IOException {
|
||||
HTable outputTable = new HTable(new HBaseConfiguration(
|
||||
job.getConfiguration()), table);
|
||||
HTable outputTable = new HTable(job.getConfiguration(), table);
|
||||
int regions = outputTable.getRegionsInfo().size();
|
||||
if (job.getNumReduceTasks() > regions)
|
||||
job.setNumReduceTasks(regions);
|
||||
|
@ -168,8 +167,7 @@ public class TableMapReduceUtil {
|
|||
*/
|
||||
public static void setNumReduceTasks(String table, Job job)
|
||||
throws IOException {
|
||||
HTable outputTable = new HTable(new HBaseConfiguration(
|
||||
job.getConfiguration()), table);
|
||||
HTable outputTable = new HTable(job.getConfiguration(), table);
|
||||
int regions = outputTable.getRegionsInfo().size();
|
||||
job.setNumReduceTasks(regions);
|
||||
}
|
||||
|
|
|
@ -106,6 +106,7 @@ public class TableOutputFormat<KEY> extends OutputFormat<KEY, Writable> {
|
|||
* @throws InterruptedException When the jobs is cancelled.
|
||||
* @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
|
||||
*/
|
||||
@Override
|
||||
public RecordWriter<KEY, Writable> getRecordWriter(
|
||||
TaskAttemptContext context)
|
||||
throws IOException, InterruptedException {
|
||||
|
@ -113,7 +114,8 @@ public class TableOutputFormat<KEY> extends OutputFormat<KEY, Writable> {
|
|||
String tableName = context.getConfiguration().get(OUTPUT_TABLE);
|
||||
HTable table = null;
|
||||
try {
|
||||
table = new HTable(new HBaseConfiguration(context.getConfiguration()),
|
||||
HBaseConfiguration.addHbaseResources(context.getConfiguration());
|
||||
table = new HTable(context.getConfiguration(),
|
||||
tableName);
|
||||
} catch(IOException e) {
|
||||
LOG.error(e);
|
||||
|
|
|
@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -117,7 +118,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
// TODO: Is this separate flag necessary?
|
||||
private final AtomicBoolean shutdownRequested = new AtomicBoolean(false);
|
||||
|
||||
private final HBaseConfiguration conf;
|
||||
private final Configuration conf;
|
||||
private final Path rootdir;
|
||||
private InfoServer infoServer;
|
||||
private final int threadWakeFrequency;
|
||||
|
@ -155,7 +156,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
* @param conf configuration
|
||||
* @throws IOException
|
||||
*/
|
||||
public HMaster(HBaseConfiguration conf) throws IOException {
|
||||
public HMaster(Configuration conf) throws IOException {
|
||||
this.conf = conf;
|
||||
// Set filesystem to be that of this.rootdir else we get complaints about
|
||||
// mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
|
||||
|
@ -208,7 +209,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
* needed populating the directory with necessary bootup files).
|
||||
* @throws IOException
|
||||
*/
|
||||
private static Path checkRootDir(final Path rd, final HBaseConfiguration c,
|
||||
private static Path checkRootDir(final Path rd, final Configuration c,
|
||||
final FileSystem fs)
|
||||
throws IOException {
|
||||
// If FS is in safe mode wait till out of it.
|
||||
|
@ -227,7 +228,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
return rd;
|
||||
}
|
||||
|
||||
private static void bootstrap(final Path rd, final HBaseConfiguration c)
|
||||
private static void bootstrap(final Path rd, final Configuration c)
|
||||
throws IOException {
|
||||
LOG.info("BOOTSTRAP: creating ROOT and first META regions");
|
||||
try {
|
||||
|
@ -269,7 +270,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
* @return This masters' address.
|
||||
* @throws UnknownHostException
|
||||
*/
|
||||
private static String getMyAddress(final HBaseConfiguration c)
|
||||
private static String getMyAddress(final Configuration c)
|
||||
throws UnknownHostException {
|
||||
// Find out our address up in DNS.
|
||||
String s = DNS.getDefaultHost(c.get("hbase.master.dns.interface","default"),
|
||||
|
@ -332,7 +333,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
/**
|
||||
* @return Return configuration being used by this server.
|
||||
*/
|
||||
public HBaseConfiguration getConfiguration() {
|
||||
public Configuration getConfiguration() {
|
||||
return this.conf;
|
||||
}
|
||||
|
||||
|
@ -1170,7 +1171,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
if (args.length < 1) {
|
||||
printUsageAndExit();
|
||||
}
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
// Process command-line args.
|
||||
for (String cmd: args) {
|
||||
|
||||
|
|
|
@ -38,10 +38,10 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HMsg;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -128,7 +128,7 @@ public class RegionManager implements HConstants {
|
|||
private final int zooKeeperPause;
|
||||
|
||||
RegionManager(HMaster master) {
|
||||
HBaseConfiguration conf = master.getConfiguration();
|
||||
Configuration conf = master.getConfiguration();
|
||||
|
||||
this.master = master;
|
||||
this.maxAssignInOneGo = conf.getInt("hbase.regions.percheckin", 10);
|
||||
|
@ -1320,7 +1320,7 @@ public class RegionManager implements HConstants {
|
|||
private float slop; // hbase.regions.slop
|
||||
private final int maxRegToClose; // hbase.regions.close.max
|
||||
|
||||
LoadBalancer(HBaseConfiguration conf) {
|
||||
LoadBalancer(Configuration conf) {
|
||||
this.slop = conf.getFloat("hbase.regions.slop", (float)0.1);
|
||||
if (this.slop <= 0) this.slop = 1;
|
||||
//maxRegToClose to constrain balance closing per one iteration
|
||||
|
|
|
@ -33,8 +33,8 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Chore;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HMsg;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -104,6 +104,7 @@ public class ServerManager implements HConstants {
|
|||
super(period, stop);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void chore() {
|
||||
int numServers = serverAddressToServerInfo.size();
|
||||
int numDeadServers = deadServers.size();
|
||||
|
@ -134,7 +135,7 @@ public class ServerManager implements HConstants {
|
|||
*/
|
||||
public ServerManager(HMaster master) {
|
||||
this.master = master;
|
||||
HBaseConfiguration c = master.getConfiguration();
|
||||
Configuration c = master.getConfiguration();
|
||||
this.nobalancingCount = c.getInt("hbase.regions.nobalancing.count", 4);
|
||||
int metaRescanInterval = c.getInt("hbase.master.meta.thread.rescanfrequency",
|
||||
60 * 1000);
|
||||
|
|
|
@ -28,7 +28,7 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
@ -49,7 +49,7 @@ class CompactSplitThread extends Thread implements HConstants {
|
|||
private final ReentrantLock lock = new ReentrantLock();
|
||||
|
||||
private final HRegionServer server;
|
||||
private final HBaseConfiguration conf;
|
||||
private final Configuration conf;
|
||||
|
||||
private final BlockingQueue<HRegion> compactionQueue =
|
||||
new LinkedBlockingQueue<HRegion>();
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -48,7 +49,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
|
|||
import org.apache.hadoop.hbase.io.Reference.Range;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
@ -135,7 +136,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
final Path basedir;
|
||||
final HLog log;
|
||||
final FileSystem fs;
|
||||
final HBaseConfiguration conf;
|
||||
final Configuration conf;
|
||||
final HRegionInfo regionInfo;
|
||||
final Path regiondir;
|
||||
private final Path regionCompactionDir;
|
||||
|
@ -248,7 +249,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
* making progress to master -- otherwise master might think region deploy
|
||||
* failed. Can be null.
|
||||
*/
|
||||
public HRegion(Path basedir, HLog log, FileSystem fs, HBaseConfiguration conf,
|
||||
public HRegion(Path basedir, HLog log, FileSystem fs, Configuration conf,
|
||||
HRegionInfo regionInfo, FlushRequester flushListener) {
|
||||
this.basedir = basedir;
|
||||
this.comparator = regionInfo.getComparator();
|
||||
|
@ -538,7 +539,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
}
|
||||
|
||||
/** @return Configuration object */
|
||||
public HBaseConfiguration getConf() {
|
||||
public Configuration getConf() {
|
||||
return this.conf;
|
||||
}
|
||||
|
||||
|
@ -1851,7 +1852,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
* @throws IOException
|
||||
*/
|
||||
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
|
||||
final HBaseConfiguration conf)
|
||||
final Configuration conf)
|
||||
throws IOException {
|
||||
Path tableDir =
|
||||
HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName());
|
||||
|
@ -1879,7 +1880,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
* @throws IOException
|
||||
*/
|
||||
public static HRegion openHRegion(final HRegionInfo info, final Path rootDir,
|
||||
final HLog log, final HBaseConfiguration conf)
|
||||
final HLog log, final Configuration conf)
|
||||
throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Opening region: " + info);
|
||||
|
@ -2124,7 +2125,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
listPaths(fs, b.getRegionDir());
|
||||
}
|
||||
|
||||
HBaseConfiguration conf = a.getConf();
|
||||
Configuration conf = a.getConf();
|
||||
HTableDescriptor tabledesc = a.getTableDesc();
|
||||
HLog log = a.getLog();
|
||||
Path basedir = a.getBaseDir();
|
||||
|
@ -2439,7 +2440,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
* @throws IOException
|
||||
*/
|
||||
private static void processTable(final FileSystem fs, final Path p,
|
||||
final HLog log, final HBaseConfiguration c,
|
||||
final HLog log, final Configuration c,
|
||||
final boolean majorCompact)
|
||||
throws IOException {
|
||||
HRegion region = null;
|
||||
|
@ -2528,7 +2529,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
majorCompact = true;
|
||||
}
|
||||
Path tableDir = new Path(args[0]);
|
||||
HBaseConfiguration c = new HBaseConfiguration();
|
||||
Configuration c = HBaseConfiguration.create();
|
||||
FileSystem fs = FileSystem.get(c);
|
||||
Path logdir = new Path(c.get("hbase.tmp.dir"),
|
||||
"hlog" + tableDir.getName() + System.currentTimeMillis());
|
||||
|
|
|
@ -134,7 +134,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
|||
protected volatile boolean fsOk;
|
||||
|
||||
protected HServerInfo serverInfo;
|
||||
protected final HBaseConfiguration conf;
|
||||
protected final Configuration conf;
|
||||
|
||||
private final ServerConnection connection;
|
||||
protected final AtomicBoolean haveRootRegion = new AtomicBoolean(false);
|
||||
|
@ -233,7 +233,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
|||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public HRegionServer(HBaseConfiguration conf) throws IOException {
|
||||
public HRegionServer(Configuration conf) throws IOException {
|
||||
machineName = DNS.getDefaultHost(
|
||||
conf.get("hbase.regionserver.dns.interface","default"),
|
||||
conf.get("hbase.regionserver.dns.nameserver","default"));
|
||||
|
@ -2044,7 +2044,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
|||
*
|
||||
* @return the configuration
|
||||
*/
|
||||
public HBaseConfiguration getConfiguration() {
|
||||
public Configuration getConfiguration() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
@ -2293,7 +2293,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
|||
if (args.length < 1) {
|
||||
printUsageAndExit();
|
||||
}
|
||||
Configuration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
|
||||
// Process command-line args. TODO: Better cmd-line processing
|
||||
// (but hopefully something not as painful as cli options).
|
||||
|
@ -2386,7 +2386,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
|||
* @param args
|
||||
*/
|
||||
public static void main(String [] args) {
|
||||
Configuration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
@SuppressWarnings("unchecked")
|
||||
Class<? extends HRegionServer> regionServerClass =
|
||||
(Class<? extends HRegionServer>) conf.getClass(HConstants.REGION_SERVER_IMPL,
|
||||
|
|
|
@ -32,8 +32,8 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.DroppedSnapshotException;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -75,7 +75,7 @@ class MemStoreFlusher extends Thread implements FlushRequester {
|
|||
* @param conf
|
||||
* @param server
|
||||
*/
|
||||
public MemStoreFlusher(final HBaseConfiguration conf,
|
||||
public MemStoreFlusher(final Configuration conf,
|
||||
final HRegionServer server) {
|
||||
super();
|
||||
this.server = server;
|
||||
|
@ -116,7 +116,7 @@ class MemStoreFlusher extends Thread implements FlushRequester {
|
|||
* @return Limit.
|
||||
*/
|
||||
static long globalMemStoreLimit(final long max,
|
||||
final float defaultLimit, final String key, final HBaseConfiguration c) {
|
||||
final float defaultLimit, final String key, final Configuration c) {
|
||||
float limit = c.getFloat(key, defaultLimit);
|
||||
return getMemStoreLimit(max, limit, defaultLimit);
|
||||
}
|
||||
|
|
|
@ -37,17 +37,15 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
|
@ -56,6 +54,8 @@ import org.apache.hadoop.hbase.io.hfile.Compression;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
@ -99,7 +99,7 @@ public class Store implements HConstants, HeapSize {
|
|||
private final HRegion region;
|
||||
private final HColumnDescriptor family;
|
||||
final FileSystem fs;
|
||||
private final HBaseConfiguration conf;
|
||||
private final Configuration conf;
|
||||
// ttl in milliseconds.
|
||||
protected long ttl;
|
||||
private long majorCompactionTime;
|
||||
|
@ -159,7 +159,7 @@ public class Store implements HConstants, HeapSize {
|
|||
* @throws IOException
|
||||
*/
|
||||
protected Store(Path basedir, HRegion region, HColumnDescriptor family,
|
||||
FileSystem fs, Path reconstructionLog, HBaseConfiguration conf,
|
||||
FileSystem fs, Path reconstructionLog, Configuration conf,
|
||||
final Progressable reporter)
|
||||
throws IOException {
|
||||
HRegionInfo info = region.regionInfo;
|
||||
|
|
|
@ -31,9 +31,9 @@ import java.util.regex.Pattern;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.HalfHFileReader;
|
||||
|
@ -102,7 +102,7 @@ public class StoreFile implements HConstants {
|
|||
|
||||
// Used making file ids.
|
||||
private final static Random rand = new Random();
|
||||
private final HBaseConfiguration conf;
|
||||
private final Configuration conf;
|
||||
|
||||
/**
|
||||
* Constructor, loads a reader and it's indices, etc. May allocate a
|
||||
|
@ -115,7 +115,7 @@ public class StoreFile implements HConstants {
|
|||
* @throws IOException When opening the reader fails.
|
||||
*/
|
||||
StoreFile(final FileSystem fs, final Path p, final boolean blockcache,
|
||||
final HBaseConfiguration conf, final boolean inMemory)
|
||||
final Configuration conf, final boolean inMemory)
|
||||
throws IOException {
|
||||
this.conf = conf;
|
||||
this.fs = fs;
|
||||
|
@ -224,7 +224,7 @@ public class StoreFile implements HConstants {
|
|||
* @param conf The current configuration.
|
||||
* @return The block cache or <code>null</code>.
|
||||
*/
|
||||
public static synchronized BlockCache getBlockCache(HBaseConfiguration conf) {
|
||||
public static synchronized BlockCache getBlockCache(Configuration conf) {
|
||||
if (hfileBlockCache != null) return hfileBlockCache;
|
||||
|
||||
float cachePercentage = conf.getFloat(HFILE_CACHE_SIZE_KEY, 0.0f);
|
||||
|
|
|
@ -32,15 +32,15 @@ import java.util.Map;
|
|||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -248,7 +248,7 @@ public class HLog implements HConstants, Syncable {
|
|||
* @param listener
|
||||
* @throws IOException
|
||||
*/
|
||||
public HLog(final FileSystem fs, final Path dir, final HBaseConfiguration conf,
|
||||
public HLog(final FileSystem fs, final Path dir, final Configuration conf,
|
||||
final LogRollListener listener)
|
||||
throws IOException {
|
||||
super();
|
||||
|
@ -389,7 +389,7 @@ public class HLog implements HConstants, Syncable {
|
|||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static Reader getReader(final FileSystem fs,
|
||||
final Path path, HBaseConfiguration conf)
|
||||
final Path path, Configuration conf)
|
||||
throws IOException {
|
||||
try {
|
||||
Class c = Class.forName(conf.get("hbase.regionserver.hlog.reader.impl",
|
||||
|
@ -413,7 +413,7 @@ public class HLog implements HConstants, Syncable {
|
|||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static Writer createWriter(final FileSystem fs,
|
||||
final Path path, HBaseConfiguration conf) throws IOException {
|
||||
final Path path, Configuration conf) throws IOException {
|
||||
try {
|
||||
Class c = Class.forName(conf.get("hbase.regionserver.hlog.writer.impl",
|
||||
SequenceFileLogWriter.class.getCanonicalName()));
|
||||
|
@ -701,6 +701,7 @@ public class HLog implements HConstants, Syncable {
|
|||
this.optionalFlushInterval = optionalFlushInterval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
lock.lock();
|
||||
|
@ -955,7 +956,7 @@ public class HLog implements HConstants, Syncable {
|
|||
* @throws IOException
|
||||
*/
|
||||
public static List<Path> splitLog(final Path rootDir, final Path srcDir,
|
||||
final FileSystem fs, final HBaseConfiguration conf)
|
||||
final FileSystem fs, final Configuration conf)
|
||||
throws IOException {
|
||||
long millis = System.currentTimeMillis();
|
||||
List<Path> splits = null;
|
||||
|
@ -1022,7 +1023,7 @@ public class HLog implements HConstants, Syncable {
|
|||
*/
|
||||
private static List<Path> splitLog(final Path rootDir,
|
||||
final FileStatus [] logfiles, final FileSystem fs,
|
||||
final HBaseConfiguration conf)
|
||||
final Configuration conf)
|
||||
throws IOException {
|
||||
final Map<byte [], WriterAndPath> logWriters =
|
||||
new TreeMap<byte [], WriterAndPath>(Bytes.BYTES_COMPARATOR);
|
||||
|
@ -1244,6 +1245,7 @@ public class HLog implements HConstants, Syncable {
|
|||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.key + "=" + this.edit;
|
||||
}
|
||||
|
@ -1315,7 +1317,7 @@ public class HLog implements HConstants, Syncable {
|
|||
System.exit(-1);
|
||||
}
|
||||
}
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
Path baseDir = new Path(conf.get(HBASE_DIR));
|
||||
for (int i = 1; i < args.length; i++) {
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.TreeMap;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -76,7 +77,7 @@ public class ThriftServer {
|
|||
* HBase client API primarily defined in the HBaseAdmin and HTable objects.
|
||||
*/
|
||||
public static class HBaseHandler implements Hbase.Iface {
|
||||
protected HBaseConfiguration conf = new HBaseConfiguration();
|
||||
protected Configuration conf;
|
||||
protected HBaseAdmin admin = null;
|
||||
protected final Log LOG = LogFactory.getLog(this.getClass().getName());
|
||||
|
||||
|
@ -155,7 +156,7 @@ public class ThriftServer {
|
|||
* @throws MasterNotRunningException
|
||||
*/
|
||||
HBaseHandler() throws MasterNotRunningException {
|
||||
conf = new HBaseConfiguration();
|
||||
conf = HBaseConfiguration.create();
|
||||
admin = new HBaseAdmin(conf);
|
||||
scannerMap = new HashMap<Integer, ResultScanner>();
|
||||
}
|
||||
|
|
|
@ -26,13 +26,13 @@ import java.net.URISyntaxException;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
@ -242,7 +242,7 @@ public class FSUtils {
|
|||
* @param wait Sleep between retries
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void waitOnSafeMode(final HBaseConfiguration conf,
|
||||
public static void waitOnSafeMode(final Configuration conf,
|
||||
final long wait)
|
||||
throws IOException {
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
|
@ -295,7 +295,7 @@ public class FSUtils {
|
|||
* configuration as a Path.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static Path getRootDir(final HBaseConfiguration c) throws IOException {
|
||||
public static Path getRootDir(final Configuration c) throws IOException {
|
||||
return new Path(c.get(HConstants.HBASE_DIR));
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -36,8 +37,8 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
|
|||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.io.WritableComparator;
|
||||
import org.apache.hadoop.util.GenericOptionsParser;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
|
@ -49,7 +50,6 @@ import org.apache.hadoop.util.ToolRunner;
|
|||
*/
|
||||
public class Merge extends Configured implements Tool {
|
||||
static final Log LOG = LogFactory.getLog(Merge.class);
|
||||
private final HBaseConfiguration conf;
|
||||
private Path rootdir;
|
||||
private volatile MetaUtils utils;
|
||||
private byte [] tableName; // Name of table
|
||||
|
@ -60,25 +60,24 @@ public class Merge extends Configured implements Tool {
|
|||
|
||||
/** default constructor */
|
||||
public Merge() {
|
||||
this(new HBaseConfiguration());
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param conf
|
||||
*/
|
||||
public Merge(HBaseConfiguration conf) {
|
||||
super(conf);
|
||||
this.conf = conf;
|
||||
public Merge(Configuration conf) {
|
||||
this.mergeInfo = null;
|
||||
setConf(conf);
|
||||
}
|
||||
|
||||
|
||||
public int run(String[] args) throws Exception {
|
||||
if (parseArgs(args) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Verify file system is up.
|
||||
FileSystem fs = FileSystem.get(this.conf); // get DFS handle
|
||||
FileSystem fs = FileSystem.get(getConf()); // get DFS handle
|
||||
LOG.info("Verifying that file system is available...");
|
||||
try {
|
||||
FSUtils.checkFileSystemAvailable(fs);
|
||||
|
@ -90,7 +89,7 @@ public class Merge extends Configured implements Tool {
|
|||
// Verify HBase is down
|
||||
LOG.info("Verifying that HBase is not running...");
|
||||
try {
|
||||
HBaseAdmin.checkHBaseAvailable(conf);
|
||||
HBaseAdmin.checkHBaseAvailable(getConf());
|
||||
LOG.fatal("HBase cluster must be off-line.");
|
||||
return -1;
|
||||
} catch (MasterNotRunningException e) {
|
||||
|
@ -99,8 +98,8 @@ public class Merge extends Configured implements Tool {
|
|||
|
||||
// Initialize MetaUtils and and get the root of the HBase installation
|
||||
|
||||
this.utils = new MetaUtils(conf);
|
||||
this.rootdir = FSUtils.getRootDir(this.conf);
|
||||
this.utils = new MetaUtils(getConf());
|
||||
this.rootdir = FSUtils.getRootDir(getConf());
|
||||
try {
|
||||
if (isMetaTable) {
|
||||
mergeTwoMetaRegions();
|
||||
|
@ -278,9 +277,9 @@ public class Merge extends Configured implements Tool {
|
|||
}
|
||||
HRegion merged = null;
|
||||
HLog log = utils.getLog();
|
||||
HRegion r1 = HRegion.openHRegion(info1, this.rootdir, log, this.conf);
|
||||
HRegion r1 = HRegion.openHRegion(info1, this.rootdir, log, getConf());
|
||||
try {
|
||||
HRegion r2 = HRegion.openHRegion(info2, this.rootdir, log, this.conf);
|
||||
HRegion r2 = HRegion.openHRegion(info2, this.rootdir, log, getConf());
|
||||
try {
|
||||
merged = HRegion.merge(r1, r2);
|
||||
} finally {
|
||||
|
@ -335,7 +334,7 @@ public class Merge extends Configured implements Tool {
|
|||
*/
|
||||
private int parseArgs(String[] args) throws IOException {
|
||||
GenericOptionsParser parser =
|
||||
new GenericOptionsParser(this.getConf(), args);
|
||||
new GenericOptionsParser(getConf(), args);
|
||||
|
||||
String[] remainingArgs = parser.getRemainingArgs();
|
||||
if (remainingArgs.length != 3) {
|
||||
|
@ -379,7 +378,7 @@ public class Merge extends Configured implements Tool {
|
|||
public static void main(String[] args) {
|
||||
int status = 0;
|
||||
try {
|
||||
status = ToolRunner.run(new Merge(), args);
|
||||
status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args);
|
||||
} catch (Exception e) {
|
||||
LOG.error("exiting due to error", e);
|
||||
status = -1;
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.TreeMap;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -42,10 +43,10 @@ import org.apache.hadoop.hbase.client.HTable;
|
|||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
|
||||
/**
|
||||
* Contains utility methods for manipulating HBase meta tables.
|
||||
|
@ -56,7 +57,7 @@ import org.apache.hadoop.hbase.regionserver.Store;
|
|||
*/
|
||||
public class MetaUtils {
|
||||
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
|
||||
private final HBaseConfiguration conf;
|
||||
private final Configuration conf;
|
||||
private FileSystem fs;
|
||||
private Path rootdir;
|
||||
private HLog log;
|
||||
|
@ -67,12 +68,12 @@ public class MetaUtils {
|
|||
/** Default constructor
|
||||
* @throws IOException */
|
||||
public MetaUtils() throws IOException {
|
||||
this(new HBaseConfiguration());
|
||||
this(HBaseConfiguration.create());
|
||||
}
|
||||
|
||||
/** @param conf HBaseConfiguration
|
||||
/** @param conf Configuration
|
||||
* @throws IOException */
|
||||
public MetaUtils(HBaseConfiguration conf) throws IOException {
|
||||
public MetaUtils(Configuration conf) throws IOException {
|
||||
this.conf = conf;
|
||||
conf.setInt("hbase.client.retries.number", 1);
|
||||
this.rootRegion = null;
|
||||
|
@ -284,7 +285,7 @@ public class MetaUtils {
|
|||
* @param onlineOffline Pass <code>true</code> to OFFLINE the region.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void changeOnlineStatus (final HBaseConfiguration c,
|
||||
public static void changeOnlineStatus (final Configuration c,
|
||||
final byte [] row, final boolean onlineOffline)
|
||||
throws IOException {
|
||||
HTable t = new HTable(c, HConstants.META_TABLE_NAME);
|
||||
|
|
|
@ -23,17 +23,18 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.UnknownHostException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.util.Properties;
|
||||
import java.util.List;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Enumeration;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.net.DNS;
|
||||
|
@ -66,7 +67,7 @@ public class HQuorumPeer implements HConstants {
|
|||
* @param args String[] of command line arguments. Not used.
|
||||
*/
|
||||
public static void main(String[] args) {
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
try {
|
||||
Properties zkProperties = makeZKProps(conf);
|
||||
writeMyID(zkProperties);
|
||||
|
@ -98,7 +99,7 @@ public class HQuorumPeer implements HConstants {
|
|||
private static void writeMyID(Properties properties) throws IOException {
|
||||
long myId = -1;
|
||||
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
String myAddress = DNS.getDefaultHost(
|
||||
conf.get("hbase.zookeeper.dns.interface","default"),
|
||||
conf.get("hbase.zookeeper.dns.nameserver","default"));
|
||||
|
@ -161,10 +162,10 @@ public class HQuorumPeer implements HConstants {
|
|||
* If there is a zoo.cfg in the classpath, simply read it in. Otherwise parse
|
||||
* the corresponding config options from the HBase XML configs and generate
|
||||
* the appropriate ZooKeeper properties.
|
||||
* @param conf HBaseConfiguration to read from.
|
||||
* @param conf Configuration to read from.
|
||||
* @return Properties holding mappings representing ZooKeeper zoo.cfg file.
|
||||
*/
|
||||
public static Properties makeZKProps(HBaseConfiguration conf) {
|
||||
public static Properties makeZKProps(Configuration conf) {
|
||||
// First check if there is a zoo.cfg in the CLASSPATH. If so, simply read
|
||||
// it and grab its configuration properties.
|
||||
ClassLoader cl = HQuorumPeer.class.getClassLoader();
|
||||
|
@ -218,7 +219,7 @@ public class HQuorumPeer implements HConstants {
|
|||
* @return Properties parsed from config stream with variables substituted.
|
||||
* @throws IOException if anything goes wrong parsing config
|
||||
*/
|
||||
public static Properties parseZooCfg(HBaseConfiguration conf,
|
||||
public static Properties parseZooCfg(Configuration conf,
|
||||
InputStream inputStream) throws IOException {
|
||||
Properties properties = new Properties();
|
||||
try {
|
||||
|
|
|
@ -3,6 +3,7 @@ package org.apache.hadoop.hbase.zookeeper;
|
|||
import java.util.Properties;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
||||
|
@ -16,7 +17,7 @@ public class ZKServerTool implements HConstants {
|
|||
* @param args Command line arguments. First arg is path to zookeepers file.
|
||||
*/
|
||||
public static void main(String args[]) {
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
// Note that we do not simply grab the property ZOOKEEPER_QUORUM from
|
||||
// the HBaseConfiguration because the user may be using a zoo.cfg file.
|
||||
Properties zkProps = HQuorumPeer.makeZKProps(conf);
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.Map.Entry;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
|
@ -42,8 +42,8 @@ import org.apache.hadoop.util.StringUtils;
|
|||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
import org.apache.zookeeper.ZooDefs.Ids;
|
||||
import org.apache.zookeeper.ZooKeeper;
|
||||
import org.apache.zookeeper.ZooDefs.Ids;
|
||||
import org.apache.zookeeper.ZooKeeper.States;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
|
||||
|
@ -72,11 +72,11 @@ public class ZooKeeperWrapper implements HConstants {
|
|||
|
||||
/**
|
||||
* Create a ZooKeeperWrapper.
|
||||
* @param conf HBaseConfiguration to read settings from.
|
||||
* @param conf Configuration to read settings from.
|
||||
* @param watcher ZooKeeper watcher to register.
|
||||
* @throws IOException If a connection error occurs.
|
||||
*/
|
||||
public ZooKeeperWrapper(HBaseConfiguration conf, Watcher watcher)
|
||||
public ZooKeeperWrapper(Configuration conf, Watcher watcher)
|
||||
throws IOException {
|
||||
Properties properties = HQuorumPeer.makeZKProps(conf);
|
||||
setQuorumServers(properties);
|
||||
|
|
|
@ -112,7 +112,8 @@ public class TestMergeTool extends HBaseTestCase {
|
|||
// Start up dfs
|
||||
this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
this.fs = this.dfsCluster.getFileSystem();
|
||||
conf.set("fs.defaultFS", fs.getUri().toString());
|
||||
System.out.println("fs=" + this.fs);
|
||||
this.conf.set("fs.defaultFS", fs.getUri().toString());
|
||||
Path parentdir = fs.getHomeDirectory();
|
||||
conf.set(HConstants.HBASE_DIR, parentdir.toString());
|
||||
fs.mkdirs(parentdir);
|
||||
|
@ -171,7 +172,8 @@ public class TestMergeTool extends HBaseTestCase {
|
|||
throws Exception {
|
||||
Merge merger = new Merge(this.conf);
|
||||
LOG.info(msg);
|
||||
int errCode = ToolRunner.run(merger,
|
||||
System.out.println("fs2=" + this.conf.get("fs.defaultFS"));
|
||||
int errCode = ToolRunner.run(this.conf, merger,
|
||||
new String[] {this.desc.getNameAsString(), regionName1, regionName2}
|
||||
);
|
||||
assertTrue("'" + msg + "' failed", errCode == 0);
|
||||
|
@ -271,4 +273,4 @@ public class TestMergeTool extends HBaseTestCase {
|
|||
log.closeAndDelete();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
<%@ page contentType="text/html;charset=UTF-8"
|
||||
import="java.util.*"
|
||||
import="java.net.URLEncoder"
|
||||
import="org.apache.hadoop.conf.Configuration"
|
||||
import="org.apache.hadoop.io.Text"
|
||||
import="org.apache.hadoop.hbase.util.Bytes"
|
||||
import="org.apache.hadoop.hbase.master.HMaster"
|
||||
|
@ -14,7 +15,7 @@
|
|||
import="org.apache.hadoop.hbase.HColumnDescriptor"
|
||||
import="org.apache.hadoop.hbase.HTableDescriptor" %><%
|
||||
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
|
||||
HBaseConfiguration conf = master.getConfiguration();
|
||||
Configuration conf = master.getConfiguration();
|
||||
HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation();
|
||||
Map<byte [], MetaRegion> onlineRegions = master.getRegionManager().getOnlineMetaRegions();
|
||||
Map<String, HServerInfo> serverToServerInfos =
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
import="java.net.URLEncoder"
|
||||
import="org.apache.hadoop.io.Text"
|
||||
import="org.apache.hadoop.io.Writable"
|
||||
import="org.apache.hadoop.conf.Configuration"
|
||||
import="org.apache.hadoop.hbase.HTableDescriptor"
|
||||
import="org.apache.hadoop.hbase.client.HTable"
|
||||
import="org.apache.hadoop.hbase.client.HBaseAdmin"
|
||||
|
@ -19,7 +20,7 @@
|
|||
import="java.util.Map"
|
||||
import="org.apache.hadoop.hbase.HConstants"%><%
|
||||
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
|
||||
HBaseConfiguration conf = master.getConfiguration();
|
||||
Configuration conf = master.getConfiguration();
|
||||
HBaseAdmin hbadmin = new HBaseAdmin(conf);
|
||||
String tableName = request.getParameter("name");
|
||||
HTable table = new HTable(conf, tableName);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
<%@ page contentType="text/html;charset=UTF-8"
|
||||
import="java.io.IOException"
|
||||
import="org.apache.hadoop.conf.Configuration"
|
||||
import="org.apache.hadoop.hbase.client.HBaseAdmin"
|
||||
import="org.apache.hadoop.hbase.client.HConnection"
|
||||
import="org.apache.hadoop.hbase.HRegionInfo"
|
||||
|
@ -8,7 +9,7 @@
|
|||
import="org.apache.hadoop.hbase.master.HMaster"
|
||||
import="org.apache.hadoop.hbase.HConstants"%><%
|
||||
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
|
||||
HBaseConfiguration conf = master.getConfiguration();
|
||||
Configuration conf = master.getConfiguration();
|
||||
HBaseAdmin hbadmin = new HBaseAdmin(conf);
|
||||
HConnection connection = hbadmin.getConnection();
|
||||
ZooKeeperWrapper wrapper = connection.getZooKeeperWrapper();
|
||||
|
|
Loading…
Reference in New Issue