HADOOP-1282. Omnibus HBase patch. Improved tests and configuration. Contributed by Jim Kellerman.
git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@532083 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
114d67c614
commit
ec9a568764
|
@ -0,0 +1,48 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>hbase.master</name>
|
||||||
|
<value>localhost:60000</value>
|
||||||
|
<description>The host and port that the HBase master runs at.
|
||||||
|
TODO: Support 'local' (All running in single context).
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.regionserver</name>
|
||||||
|
<value>localhost:60010</value>
|
||||||
|
<description>The host and port a HBase region server runs at.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.regiondir</name>
|
||||||
|
<value>${hadoop.tmp.dir}/hbase</value>
|
||||||
|
<description>The directory shared by region servers.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.client.timeout.length</name>
|
||||||
|
<value>10000</value>
|
||||||
|
<description>Client timeout in milliseconds</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.client.timeout.number</name>
|
||||||
|
<value>5</value>
|
||||||
|
<description>Try this many timeouts before giving up.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.client.retries.number</name>
|
||||||
|
<value>2</value>
|
||||||
|
<description>Count of maximum retries fetching the root region from root
|
||||||
|
region server.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.master.meta.thread.rescanfrequency</name>
|
||||||
|
<value>60000</value>
|
||||||
|
<description>How long the HMaster sleeps (in milliseconds) between scans of
|
||||||
|
the META table.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
|
@ -60,17 +60,17 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
String column = col.toString();
|
String column = col.toString();
|
||||||
try {
|
try {
|
||||||
int colpos = column.indexOf(":") + 1;
|
int colpos = column.indexOf(":") + 1;
|
||||||
if (colpos == 0) {
|
if(colpos == 0) {
|
||||||
throw new IllegalArgumentException("Column name has no family indicator.");
|
throw new IllegalArgumentException("Column name has no family indicator.");
|
||||||
}
|
}
|
||||||
|
|
||||||
String columnkey = column.substring(colpos);
|
String columnkey = column.substring(colpos);
|
||||||
|
|
||||||
if (columnkey == null || columnkey.length() == 0) {
|
if(columnkey == null || columnkey.length() == 0) {
|
||||||
this.matchType = MATCH_TYPE.FAMILY_ONLY;
|
this.matchType = MATCH_TYPE.FAMILY_ONLY;
|
||||||
this.family = column.substring(0, colpos);
|
this.family = column.substring(0, colpos);
|
||||||
|
|
||||||
} else if (isRegexPattern.matcher(columnkey).matches()) {
|
} else if(isRegexPattern.matcher(columnkey).matches()) {
|
||||||
this.matchType = MATCH_TYPE.REGEX;
|
this.matchType = MATCH_TYPE.REGEX;
|
||||||
this.columnMatcher = Pattern.compile(column);
|
this.columnMatcher = Pattern.compile(column);
|
||||||
|
|
||||||
|
@ -86,13 +86,13 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
// Matching method
|
// Matching method
|
||||||
|
|
||||||
boolean matches(Text col) throws IOException {
|
boolean matches(Text col) throws IOException {
|
||||||
if (this.matchType == MATCH_TYPE.SIMPLE) {
|
if(this.matchType == MATCH_TYPE.SIMPLE) {
|
||||||
return col.equals(this.col);
|
return col.equals(this.col);
|
||||||
|
|
||||||
} else if (this.matchType == MATCH_TYPE.FAMILY_ONLY) {
|
} else if(this.matchType == MATCH_TYPE.FAMILY_ONLY) {
|
||||||
return col.toString().startsWith(this.family);
|
return col.toString().startsWith(this.family);
|
||||||
|
|
||||||
} else if (this.matchType == MATCH_TYPE.REGEX) {
|
} else if(this.matchType == MATCH_TYPE.REGEX) {
|
||||||
return this.columnMatcher.matcher(col.toString()).matches();
|
return this.columnMatcher.matcher(col.toString()).matches();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -121,7 +121,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
for(int i = 0; i < targetCols.length; i++) {
|
for(int i = 0; i < targetCols.length; i++) {
|
||||||
Text family = HStoreKey.extractFamily(targetCols[i]);
|
Text family = HStoreKey.extractFamily(targetCols[i]);
|
||||||
Vector<ColumnMatcher> matchers = okCols.get(family);
|
Vector<ColumnMatcher> matchers = okCols.get(family);
|
||||||
if (matchers == null) {
|
if(matchers == null) {
|
||||||
matchers = new Vector<ColumnMatcher>();
|
matchers = new Vector<ColumnMatcher>();
|
||||||
}
|
}
|
||||||
matchers.add(new ColumnMatcher(targetCols[i]));
|
matchers.add(new ColumnMatcher(targetCols[i]));
|
||||||
|
@ -144,11 +144,11 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
Text column = keys[i].getColumn();
|
Text column = keys[i].getColumn();
|
||||||
Text family = HStoreKey.extractFamily(column);
|
Text family = HStoreKey.extractFamily(column);
|
||||||
Vector<ColumnMatcher> matchers = okCols.get(family);
|
Vector<ColumnMatcher> matchers = okCols.get(family);
|
||||||
if (matchers == null) {
|
if(matchers == null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
for(int m = 0; m < matchers.size(); m++) {
|
for(int m = 0; m < matchers.size(); m++) {
|
||||||
if (matchers.get(m).matches(column)) {
|
if(matchers.get(m).matches(column)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
* @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
|
* @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
|
||||||
*/
|
*/
|
||||||
public boolean next(HStoreKey key, TreeMap<Text, byte[]> results)
|
public boolean next(HStoreKey key, TreeMap<Text, byte[]> results)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
// Find the next row label (and timestamp)
|
// Find the next row label (and timestamp)
|
||||||
|
|
||||||
|
@ -188,12 +188,12 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
long chosenTimestamp = -1;
|
long chosenTimestamp = -1;
|
||||||
for(int i = 0; i < keys.length; i++) {
|
for(int i = 0; i < keys.length; i++) {
|
||||||
while((keys[i] != null)
|
while((keys[i] != null)
|
||||||
&& (columnMatch(i))
|
&& (columnMatch(i))
|
||||||
&& (keys[i].getTimestamp() <= this.timestamp)
|
&& (keys[i].getTimestamp() <= this.timestamp)
|
||||||
&& ((chosenRow == null)
|
&& ((chosenRow == null)
|
||||||
|| (keys[i].getRow().compareTo(chosenRow) < 0)
|
|| (keys[i].getRow().compareTo(chosenRow) < 0)
|
||||||
|| ((keys[i].getRow().compareTo(chosenRow) == 0)
|
|| ((keys[i].getRow().compareTo(chosenRow) == 0)
|
||||||
&& (keys[i].getTimestamp() > chosenTimestamp)))) {
|
&& (keys[i].getTimestamp() > chosenTimestamp)))) {
|
||||||
|
|
||||||
chosenRow = new Text(keys[i].getRow());
|
chosenRow = new Text(keys[i].getRow());
|
||||||
chosenTimestamp = keys[i].getTimestamp();
|
chosenTimestamp = keys[i].getTimestamp();
|
||||||
|
@ -203,7 +203,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
// Grab all the values that match this row/timestamp
|
// Grab all the values that match this row/timestamp
|
||||||
|
|
||||||
boolean insertedItem = false;
|
boolean insertedItem = false;
|
||||||
if (chosenRow != null) {
|
if(chosenRow != null) {
|
||||||
key.setRow(chosenRow);
|
key.setRow(chosenRow);
|
||||||
key.setVersion(chosenTimestamp);
|
key.setVersion(chosenTimestamp);
|
||||||
key.setColumn(new Text(""));
|
key.setColumn(new Text(""));
|
||||||
|
@ -212,10 +212,10 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
// Fetch the data
|
// Fetch the data
|
||||||
|
|
||||||
while((keys[i] != null)
|
while((keys[i] != null)
|
||||||
&& (keys[i].getRow().compareTo(chosenRow) == 0)
|
&& (keys[i].getRow().compareTo(chosenRow) == 0)
|
||||||
&& (keys[i].getTimestamp() == chosenTimestamp)) {
|
&& (keys[i].getTimestamp() == chosenTimestamp)) {
|
||||||
|
|
||||||
if (columnMatch(i)) {
|
if(columnMatch(i)) {
|
||||||
outbuf.reset();
|
outbuf.reset();
|
||||||
vals[i].write(outbuf);
|
vals[i].write(outbuf);
|
||||||
byte byteresults[] = outbuf.getData();
|
byte byteresults[] = outbuf.getData();
|
||||||
|
@ -226,7 +226,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
insertedItem = true;
|
insertedItem = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!getNext(i)) {
|
if (! getNext(i)) {
|
||||||
closeSubScanner(i);
|
closeSubScanner(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -235,9 +235,9 @@ public abstract class HAbstractScanner implements HScannerInterface {
|
||||||
// a valid timestamp, so we're ready next time.
|
// a valid timestamp, so we're ready next time.
|
||||||
|
|
||||||
while((keys[i] != null)
|
while((keys[i] != null)
|
||||||
&& ((keys[i].getRow().compareTo(chosenRow) <= 0)
|
&& ((keys[i].getRow().compareTo(chosenRow) <= 0)
|
||||||
|| (keys[i].getTimestamp() > this.timestamp)
|
|| (keys[i].getTimestamp() > this.timestamp)
|
||||||
|| (!columnMatch(i)))) {
|
|| (! columnMatch(i)))) {
|
||||||
|
|
||||||
getNext(i);
|
getNext(i);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2007 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
||||||
|
public class HBaseConfiguration extends Configuration {
|
||||||
|
public HBaseConfiguration() {
|
||||||
|
super();
|
||||||
|
addDefaultResource("hbase-default.xml");
|
||||||
|
}
|
||||||
|
}
|
|
@ -15,29 +15,39 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import org.apache.hadoop.io.*;
|
import java.io.IOException;
|
||||||
import org.apache.hadoop.ipc.*;
|
import java.util.ArrayList;
|
||||||
import org.apache.hadoop.conf.*;
|
import java.util.Collection;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
import java.util.TreeSet;
|
||||||
|
|
||||||
import java.io.*;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import java.util.*;
|
import org.apache.hadoop.io.BytesWritable;
|
||||||
|
import org.apache.hadoop.io.DataInputBuffer;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* HClient manages a connection to a single HRegionServer.
|
* HClient manages a connection to a single HRegionServer.
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
public class HClient extends HGlobals implements HConstants {
|
public class HClient implements HConstants {
|
||||||
|
private final Logger LOG =
|
||||||
|
Logger.getLogger(this.getClass().getName());
|
||||||
|
|
||||||
private static final Text[] metaColumns = {
|
private static final Text[] metaColumns = {
|
||||||
META_COLUMN_FAMILY
|
META_COLUMN_FAMILY
|
||||||
};
|
};
|
||||||
private static final Text startRow = new Text();
|
private static final Text startRow = new Text();
|
||||||
|
|
||||||
private boolean closed;
|
private boolean closed;
|
||||||
private Configuration conf;
|
|
||||||
private HServerAddress masterLocation;
|
|
||||||
private long clientTimeout;
|
private long clientTimeout;
|
||||||
private int numTimeouts;
|
private int numTimeouts;
|
||||||
private int numRetries;
|
private int numRetries;
|
||||||
private HMasterInterface master;
|
private HMasterInterface master;
|
||||||
|
private final Configuration conf;
|
||||||
|
|
||||||
private class TableInfo {
|
private class TableInfo {
|
||||||
public HRegionInfo regionInfo;
|
public HRegionInfo regionInfo;
|
||||||
|
@ -72,16 +82,11 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
public HClient(Configuration conf) {
|
public HClient(Configuration conf) {
|
||||||
this.closed = false;
|
this.closed = false;
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
|
|
||||||
// Load config settings
|
|
||||||
|
|
||||||
this.masterLocation = new HServerAddress(this.conf.get(MASTER_DEFAULT_NAME));
|
|
||||||
this.clientTimeout = this.conf.getLong("hbase.client.timeout.length", 10 * 1000);
|
|
||||||
this.numTimeouts = this.conf.getInt("hbase.client.timeout.number", 5);
|
|
||||||
this.numRetries = this.conf.getInt("hbase.client.retries.number", 2);
|
|
||||||
|
|
||||||
// Finish initialization
|
|
||||||
|
|
||||||
|
this.clientTimeout = conf.getLong("hbase.client.timeout.length", 10 * 1000);
|
||||||
|
this.numTimeouts = conf.getInt("hbase.client.timeout.number", 5);
|
||||||
|
this.numRetries = conf.getInt("hbase.client.retries.number", 2);
|
||||||
|
|
||||||
this.master = null;
|
this.master = null;
|
||||||
this.tablesToServers = new TreeMap<Text, TreeMap<Text, TableInfo>>();
|
this.tablesToServers = new TreeMap<Text, TreeMap<Text, TableInfo>>();
|
||||||
this.tableServers = null;
|
this.tableServers = null;
|
||||||
|
@ -94,13 +99,33 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
this.rand = new Random();
|
this.rand = new Random();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public synchronized void createTable(HTableDescriptor desc) throws IOException {
|
||||||
|
if(closed) {
|
||||||
|
throw new IllegalStateException("client is not open");
|
||||||
|
}
|
||||||
|
if(master == null) {
|
||||||
|
locateRootRegion();
|
||||||
|
}
|
||||||
|
master.createTable(desc);
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void deleteTable(Text tableName) throws IOException {
|
||||||
|
if(closed) {
|
||||||
|
throw new IllegalStateException("client is not open");
|
||||||
|
}
|
||||||
|
if(master == null) {
|
||||||
|
locateRootRegion();
|
||||||
|
}
|
||||||
|
master.deleteTable(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
public synchronized void openTable(Text tableName) throws IOException {
|
public synchronized void openTable(Text tableName) throws IOException {
|
||||||
if (closed) {
|
if(closed) {
|
||||||
throw new IllegalStateException("client is not open");
|
throw new IllegalStateException("client is not open");
|
||||||
}
|
}
|
||||||
|
|
||||||
tableServers = tablesToServers.get(tableName);
|
tableServers = tablesToServers.get(tableName);
|
||||||
if (tableServers == null) { // We don't know where the table is
|
if(tableServers == null ) { // We don't know where the table is
|
||||||
findTableInMeta(tableName); // Load the information from meta
|
findTableInMeta(tableName); // Load the information from meta
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -108,9 +133,9 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
private void findTableInMeta(Text tableName) throws IOException {
|
private void findTableInMeta(Text tableName) throws IOException {
|
||||||
TreeMap<Text, TableInfo> metaServers = tablesToServers.get(META_TABLE_NAME);
|
TreeMap<Text, TableInfo> metaServers = tablesToServers.get(META_TABLE_NAME);
|
||||||
|
|
||||||
if (metaServers == null) { // Don't know where the meta is
|
if(metaServers == null) { // Don't know where the meta is
|
||||||
loadMetaFromRoot(tableName);
|
loadMetaFromRoot(tableName);
|
||||||
if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
|
if(tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
|
||||||
// All we really wanted was the meta or root table
|
// All we really wanted was the meta or root table
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -119,7 +144,7 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
|
|
||||||
tableServers = new TreeMap<Text, TableInfo>();
|
tableServers = new TreeMap<Text, TableInfo>();
|
||||||
for(Iterator<TableInfo> i = metaServers.tailMap(tableName).values().iterator();
|
for(Iterator<TableInfo> i = metaServers.tailMap(tableName).values().iterator();
|
||||||
i.hasNext();) {
|
i.hasNext(); ) {
|
||||||
|
|
||||||
TableInfo t = i.next();
|
TableInfo t = i.next();
|
||||||
|
|
||||||
|
@ -133,7 +158,7 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
*/
|
*/
|
||||||
private void loadMetaFromRoot(Text tableName) throws IOException {
|
private void loadMetaFromRoot(Text tableName) throws IOException {
|
||||||
locateRootRegion();
|
locateRootRegion();
|
||||||
if (tableName.equals(ROOT_TABLE_NAME)) { // All we really wanted was the root
|
if(tableName.equals(ROOT_TABLE_NAME)) { // All we really wanted was the root
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
scanRoot();
|
scanRoot();
|
||||||
|
@ -144,10 +169,12 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
* could be.
|
* could be.
|
||||||
*/
|
*/
|
||||||
private void locateRootRegion() throws IOException {
|
private void locateRootRegion() throws IOException {
|
||||||
if (master == null) {
|
if(master == null) {
|
||||||
|
HServerAddress masterLocation =
|
||||||
|
new HServerAddress(this.conf.get(MASTER_ADDRESS));
|
||||||
master = (HMasterInterface)RPC.getProxy(HMasterInterface.class,
|
master = (HMasterInterface)RPC.getProxy(HMasterInterface.class,
|
||||||
HMasterInterface.versionID,
|
HMasterInterface.versionID,
|
||||||
masterLocation.getInetSocketAddress(), conf);
|
masterLocation.getInetSocketAddress(), conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
int tries = 0;
|
int tries = 0;
|
||||||
|
@ -157,16 +184,15 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
while(rootRegionLocation == null && localTimeouts < numTimeouts) {
|
while(rootRegionLocation == null && localTimeouts < numTimeouts) {
|
||||||
rootRegionLocation = master.findRootRegion();
|
rootRegionLocation = master.findRootRegion();
|
||||||
|
|
||||||
if (rootRegionLocation == null) {
|
if(rootRegionLocation == null) {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(clientTimeout);
|
Thread.sleep(clientTimeout);
|
||||||
|
|
||||||
} catch(InterruptedException iex) {
|
} catch(InterruptedException iex) {
|
||||||
}
|
}
|
||||||
localTimeouts++;
|
localTimeouts++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (rootRegionLocation == null) {
|
if(rootRegionLocation == null) {
|
||||||
throw new IOException("Timed out trying to locate root region");
|
throw new IOException("Timed out trying to locate root region");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,9 +200,9 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
|
|
||||||
HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
|
HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
|
||||||
|
|
||||||
if (rootRegion.getRegionInfo(rootRegionInfo.regionName) != null) {
|
if(rootRegion.getRegionInfo(HGlobals.rootRegionInfo.regionName) != null) {
|
||||||
tableServers = new TreeMap<Text, TableInfo>();
|
tableServers = new TreeMap<Text, TableInfo>();
|
||||||
tableServers.put(startRow, new TableInfo(rootRegionInfo, rootRegionLocation));
|
tableServers.put(startRow, new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation));
|
||||||
tablesToServers.put(ROOT_TABLE_NAME, tableServers);
|
tablesToServers.put(ROOT_TABLE_NAME, tableServers);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -184,7 +210,7 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
|
|
||||||
} while(rootRegionLocation == null && tries++ < numRetries);
|
} while(rootRegionLocation == null && tries++ < numRetries);
|
||||||
|
|
||||||
if (rootRegionLocation == null) {
|
if(rootRegionLocation == null) {
|
||||||
closed = true;
|
closed = true;
|
||||||
throw new IOException("unable to locate root region server");
|
throw new IOException("unable to locate root region server");
|
||||||
}
|
}
|
||||||
|
@ -202,53 +228,64 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scans a single meta region
|
* Scans a single meta region
|
||||||
* @param t - the table we're going to scan
|
* @param t the table we're going to scan
|
||||||
* @param tableName - the name of the table we're looking for
|
* @param tableName the name of the table we're looking for
|
||||||
*/
|
*/
|
||||||
private void scanOneMetaRegion(TableInfo t, Text tableName) throws IOException {
|
private void scanOneMetaRegion(TableInfo t, Text tableName) throws IOException {
|
||||||
HRegionInterface server = getHRegionConnection(t.serverAddress);
|
HRegionInterface server = getHRegionConnection(t.serverAddress);
|
||||||
HScannerInterface scanner = null;
|
long scannerId = -1L;
|
||||||
try {
|
try {
|
||||||
scanner = server.openScanner(t.regionInfo.regionName, metaColumns, tableName);
|
scannerId = server.openScanner(t.regionInfo.regionName, metaColumns, tableName);
|
||||||
HStoreKey key = new HStoreKey();
|
|
||||||
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
|
||||||
DataInputBuffer inbuf = new DataInputBuffer();
|
|
||||||
|
|
||||||
while(scanner.next(key, results)) {
|
DataInputBuffer inbuf = new DataInputBuffer();
|
||||||
byte hRegionInfoBytes[] = results.get(META_COL_REGIONINFO);
|
while(true) {
|
||||||
inbuf.reset(hRegionInfoBytes, hRegionInfoBytes.length);
|
HStoreKey key = new HStoreKey();
|
||||||
|
|
||||||
|
LabelledData[] values = server.next(scannerId, key);
|
||||||
|
if(values.length == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||||
|
for(int i = 0; i < values.length; i++) {
|
||||||
|
results.put(values[i].getLabel(), values[i].getData().get());
|
||||||
|
}
|
||||||
HRegionInfo regionInfo = new HRegionInfo();
|
HRegionInfo regionInfo = new HRegionInfo();
|
||||||
|
byte[] bytes = results.get(META_COL_REGIONINFO);
|
||||||
|
inbuf.reset(bytes, bytes.length);
|
||||||
regionInfo.readFields(inbuf);
|
regionInfo.readFields(inbuf);
|
||||||
|
|
||||||
if (!regionInfo.tableDesc.getName().equals(tableName)) {
|
if(!regionInfo.tableDesc.getName().equals(tableName)) {
|
||||||
// We're done
|
// We're done
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
byte serverBytes[] = results.get(META_COL_SERVER);
|
bytes = results.get(META_COL_SERVER);
|
||||||
String serverName = new String(serverBytes, UTF8_ENCODING);
|
String serverName = new String(bytes, UTF8_ENCODING);
|
||||||
|
|
||||||
tableServers.put(regionInfo.startKey,
|
tableServers.put(regionInfo.startKey,
|
||||||
new TableInfo(regionInfo, new HServerAddress(serverName)));
|
new TableInfo(regionInfo, new HServerAddress(serverName)));
|
||||||
|
|
||||||
results.clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
scanner.close();
|
if(scannerId != -1L) {
|
||||||
|
server.close(scannerId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer)
|
synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
// See if we already have a connection
|
// See if we already have a connection
|
||||||
|
|
||||||
HRegionInterface server = servers.get(regionServer.toString());
|
HRegionInterface server = servers.get(regionServer.toString());
|
||||||
|
|
||||||
if (server == null) { // Get a connection
|
if(server == null) { // Get a connection
|
||||||
|
|
||||||
server = (HRegionInterface)RPC.waitForProxy(HRegionInterface.class,
|
server = (HRegionInterface)RPC.waitForProxy(HRegionInterface.class,
|
||||||
HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
|
HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
|
||||||
|
|
||||||
servers.put(regionServer.toString(), server);
|
servers.put(regionServer.toString(), server);
|
||||||
}
|
}
|
||||||
|
@ -257,7 +294,7 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
|
|
||||||
/** Close the connection to the HRegionServer */
|
/** Close the connection to the HRegionServer */
|
||||||
public synchronized void close() throws IOException {
|
public synchronized void close() throws IOException {
|
||||||
if (!closed) {
|
if(! closed) {
|
||||||
RPC.stopClient();
|
RPC.stopClient();
|
||||||
closed = true;
|
closed = true;
|
||||||
}
|
}
|
||||||
|
@ -270,48 +307,58 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
* catalog table that just contains table names and their descriptors.
|
* catalog table that just contains table names and their descriptors.
|
||||||
* Right now, it only exists as part of the META table's region info.
|
* Right now, it only exists as part of the META table's region info.
|
||||||
*/
|
*/
|
||||||
public HTableDescriptor[] listTables() throws IOException {
|
public synchronized HTableDescriptor[] listTables() throws IOException {
|
||||||
TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
|
TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
|
||||||
|
|
||||||
TreeMap<Text, TableInfo> metaTables = tablesToServers.get(META_TABLE_NAME);
|
TreeMap<Text, TableInfo> metaTables = tablesToServers.get(META_TABLE_NAME);
|
||||||
if (metaTables == null) {
|
if(metaTables == null) {
|
||||||
// Meta is not loaded yet so go do that
|
// Meta is not loaded yet so go do that
|
||||||
loadMetaFromRoot(META_TABLE_NAME);
|
loadMetaFromRoot(META_TABLE_NAME);
|
||||||
metaTables = tablesToServers.get(META_TABLE_NAME);
|
metaTables = tablesToServers.get(META_TABLE_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
for(Iterator<TableInfo>i = metaTables.values().iterator(); i.hasNext();) {
|
for(Iterator<TableInfo>it = metaTables.values().iterator(); it.hasNext(); ) {
|
||||||
TableInfo t = i.next();
|
TableInfo t = it.next();
|
||||||
HRegionInterface server = getHRegionConnection(t.serverAddress);
|
HRegionInterface server = getHRegionConnection(t.serverAddress);
|
||||||
HScannerInterface scanner = null;
|
long scannerId = -1L;
|
||||||
try {
|
try {
|
||||||
scanner = server.openScanner(t.regionInfo.regionName, metaColumns, startRow);
|
scannerId = server.openScanner(t.regionInfo.regionName, metaColumns, startRow);
|
||||||
HStoreKey key = new HStoreKey();
|
HStoreKey key = new HStoreKey();
|
||||||
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
|
||||||
DataInputBuffer inbuf = new DataInputBuffer();
|
DataInputBuffer inbuf = new DataInputBuffer();
|
||||||
while(scanner.next(key, results)) {
|
while(true) {
|
||||||
byte infoBytes[] = (byte[]) results.get(ROOT_COL_REGIONINFO);
|
LabelledData[] values = server.next(scannerId, key);
|
||||||
inbuf.reset(infoBytes, infoBytes.length);
|
if(values.length == 0) {
|
||||||
HRegionInfo info = new HRegionInfo();
|
break;
|
||||||
info.readFields(inbuf);
|
}
|
||||||
|
|
||||||
// Only examine the rows where the startKey is zero length
|
for(int i = 0; i < values.length; i++) {
|
||||||
|
if(values[i].getLabel().equals(META_COL_REGIONINFO)) {
|
||||||
if (info.startKey.getLength() == 0) {
|
byte[] bytes = values[i].getData().get();
|
||||||
uniqueTables.add(info.tableDesc);
|
inbuf.reset(bytes, bytes.length);
|
||||||
|
HRegionInfo info = new HRegionInfo();
|
||||||
|
info.readFields(inbuf);
|
||||||
|
|
||||||
|
// Only examine the rows where the startKey is zero length
|
||||||
|
|
||||||
|
if(info.startKey.getLength() == 0) {
|
||||||
|
uniqueTables.add(info.tableDesc);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
results.clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
scanner.close();
|
if(scannerId != -1L) {
|
||||||
|
server.close(scannerId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return (HTableDescriptor[]) uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
|
return (HTableDescriptor[]) uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
private TableInfo getTableInfo(Text row) {
|
private synchronized TableInfo getTableInfo(Text row) {
|
||||||
if (tableServers == null) {
|
if(tableServers == null) {
|
||||||
throw new IllegalStateException("Must open table first");
|
throw new IllegalStateException("Must open table first");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -325,17 +372,17 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
public byte[] get(Text row, Text column) throws IOException {
|
public byte[] get(Text row, Text column) throws IOException {
|
||||||
TableInfo info = getTableInfo(row);
|
TableInfo info = getTableInfo(row);
|
||||||
return getHRegionConnection(info.serverAddress).get(
|
return getHRegionConnection(info.serverAddress).get(
|
||||||
info.regionInfo.regionName, row, column).get();
|
info.regionInfo.regionName, row, column).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get the specified number of versions of the specified row and column */
|
/** Get the specified number of versions of the specified row and column */
|
||||||
public byte[][] get(Text row, Text column, int numVersions) throws IOException {
|
public byte[][] get(Text row, Text column, int numVersions) throws IOException {
|
||||||
TableInfo info = getTableInfo(row);
|
TableInfo info = getTableInfo(row);
|
||||||
BytesWritable[] values = getHRegionConnection(info.serverAddress).get(
|
BytesWritable[] values = getHRegionConnection(info.serverAddress).get(
|
||||||
info.regionInfo.regionName, row, column, numVersions);
|
info.regionInfo.regionName, row, column, numVersions);
|
||||||
|
|
||||||
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
|
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
|
||||||
for(int i = 0; i < values.length; i++) {
|
for(int i = 0 ; i < values.length; i++) {
|
||||||
bytes.add(values[i].get());
|
bytes.add(values[i].get());
|
||||||
}
|
}
|
||||||
return bytes.toArray(new byte[values.length][]);
|
return bytes.toArray(new byte[values.length][]);
|
||||||
|
@ -348,10 +395,10 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
public byte[][] get(Text row, Text column, long timestamp, int numVersions) throws IOException {
|
public byte[][] get(Text row, Text column, long timestamp, int numVersions) throws IOException {
|
||||||
TableInfo info = getTableInfo(row);
|
TableInfo info = getTableInfo(row);
|
||||||
BytesWritable[] values = getHRegionConnection(info.serverAddress).get(
|
BytesWritable[] values = getHRegionConnection(info.serverAddress).get(
|
||||||
info.regionInfo.regionName, row, column, timestamp, numVersions);
|
info.regionInfo.regionName, row, column, timestamp, numVersions);
|
||||||
|
|
||||||
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
|
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
|
||||||
for(int i = 0; i < values.length; i++) {
|
for(int i = 0 ; i < values.length; i++) {
|
||||||
bytes.add(values[i].get());
|
bytes.add(values[i].get());
|
||||||
}
|
}
|
||||||
return bytes.toArray(new byte[values.length][]);
|
return bytes.toArray(new byte[values.length][]);
|
||||||
|
@ -361,15 +408,15 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
public LabelledData[] getRow(Text row) throws IOException {
|
public LabelledData[] getRow(Text row) throws IOException {
|
||||||
TableInfo info = getTableInfo(row);
|
TableInfo info = getTableInfo(row);
|
||||||
return getHRegionConnection(info.serverAddress).getRow(
|
return getHRegionConnection(info.serverAddress).getRow(
|
||||||
info.regionInfo.regionName, row);
|
info.regionInfo.regionName, row);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a scanner on the current table starting at the specified row.
|
* Get a scanner on the current table starting at the specified row.
|
||||||
* Return the specified columns.
|
* Return the specified columns.
|
||||||
*/
|
*/
|
||||||
public HScannerInterface obtainScanner(Text[] columns, Text startRow) throws IOException {
|
public synchronized HScannerInterface obtainScanner(Text[] columns, Text startRow) throws IOException {
|
||||||
if (tableServers == null) {
|
if(tableServers == null) {
|
||||||
throw new IllegalStateException("Must open table first");
|
throw new IllegalStateException("Must open table first");
|
||||||
}
|
}
|
||||||
return new ClientScanner(columns, startRow);
|
return new ClientScanner(columns, startRow);
|
||||||
|
@ -462,7 +509,7 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
private TableInfo[] regions;
|
private TableInfo[] regions;
|
||||||
private int currentRegion;
|
private int currentRegion;
|
||||||
private HRegionInterface server;
|
private HRegionInterface server;
|
||||||
private HScannerInterface scanner;
|
private long scannerId;
|
||||||
|
|
||||||
public ClientScanner(Text[] columns, Text startRow) throws IOException {
|
public ClientScanner(Text[] columns, Text startRow) throws IOException {
|
||||||
this.columns = columns;
|
this.columns = columns;
|
||||||
|
@ -472,7 +519,7 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
this.regions = info.toArray(new TableInfo[info.size()]);
|
this.regions = info.toArray(new TableInfo[info.size()]);
|
||||||
this.currentRegion = -1;
|
this.currentRegion = -1;
|
||||||
this.server = null;
|
this.server = null;
|
||||||
this.scanner = null;
|
this.scannerId = -1L;
|
||||||
nextScanner();
|
nextScanner();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -481,18 +528,19 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
* Returns false if there are no more scanners.
|
* Returns false if there are no more scanners.
|
||||||
*/
|
*/
|
||||||
private boolean nextScanner() throws IOException {
|
private boolean nextScanner() throws IOException {
|
||||||
if (scanner != null) {
|
if(scannerId != -1L) {
|
||||||
scanner.close();
|
server.close(scannerId);
|
||||||
|
scannerId = -1L;
|
||||||
}
|
}
|
||||||
currentRegion += 1;
|
currentRegion += 1;
|
||||||
if (currentRegion == regions.length) {
|
if(currentRegion == regions.length) {
|
||||||
close();
|
close();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
server = getHRegionConnection(regions[currentRegion].serverAddress);
|
server = getHRegionConnection(regions[currentRegion].serverAddress);
|
||||||
scanner = server.openScanner(regions[currentRegion].regionInfo.regionName,
|
scannerId = server.openScanner(regions[currentRegion].regionInfo.regionName,
|
||||||
columns, startRow);
|
columns, startRow);
|
||||||
|
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
close();
|
close();
|
||||||
|
@ -505,29 +553,66 @@ public class HClient extends HGlobals implements HConstants {
|
||||||
* @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
|
* @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
|
||||||
*/
|
*/
|
||||||
public boolean next(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
|
public boolean next(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
|
||||||
if (closed) {
|
if(closed) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
boolean status = scanner.next(key, results);
|
LabelledData[] values = null;
|
||||||
if (!status) {
|
do {
|
||||||
status = nextScanner();
|
values = server.next(scannerId, key);
|
||||||
if (status) {
|
} while(values.length == 0 && nextScanner());
|
||||||
status = scanner.next(key, results);
|
|
||||||
}
|
for(int i = 0; i < values.length; i++) {
|
||||||
|
results.put(values[i].getLabel(), values[i].getData().get());
|
||||||
}
|
}
|
||||||
return status;
|
return values.length != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* (non-Javadoc)
|
/* (non-Javadoc)
|
||||||
* @see org.apache.hadoop.hbase.HScannerInterface#close()
|
* @see org.apache.hadoop.hbase.HScannerInterface#close()
|
||||||
*/
|
*/
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
if (scanner != null) {
|
if(scannerId != -1L) {
|
||||||
scanner.close();
|
server.close(scannerId);
|
||||||
}
|
}
|
||||||
server = null;
|
server = null;
|
||||||
closed = true;
|
closed = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
private void printUsage() {
|
||||||
|
System.err.println("Usage: java " + this.getClass().getName() +
|
||||||
|
" [--master=hostname:port]");
|
||||||
|
}
|
||||||
|
|
||||||
|
private int doCommandLine(final String args[]) {
|
||||||
|
// Process command-line args. TODO: Better cmd-line processing
|
||||||
|
// (but hopefully something not as painful as cli options).
|
||||||
|
for (String cmd: args) {
|
||||||
|
if (cmd.equals("-h") || cmd.startsWith("--h")) {
|
||||||
|
printUsage();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
final String masterArgKey = "--master=";
|
||||||
|
if (cmd.startsWith(masterArgKey)) {
|
||||||
|
this.conf.set(MASTER_ADDRESS,
|
||||||
|
cmd.substring(masterArgKey.length()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int errCode = -1;
|
||||||
|
try {
|
||||||
|
locateRootRegion();
|
||||||
|
} catch (Exception e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
|
||||||
|
return errCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(final String args[]) {
|
||||||
|
Configuration c = new HBaseConfiguration();
|
||||||
|
int errCode = (new HClient(c)).doCommandLine(args);
|
||||||
|
System.exit(errCode);
|
||||||
|
}
|
||||||
|
}
|
|
@ -24,7 +24,18 @@ public interface HConstants {
|
||||||
|
|
||||||
// Configuration parameters
|
// Configuration parameters
|
||||||
|
|
||||||
static final String MASTER_DEFAULT_NAME = "hbase.master.default.name";
|
// TODO: URL for hbase master, like hdfs URLs with host and port.
|
||||||
|
// Or, like jdbc URLs:
|
||||||
|
// jdbc:mysql://[host][,failoverhost...][:port]/[database]
|
||||||
|
// jdbc:mysql://[host][,failoverhost...][:port]/[database][?propertyName1][=propertyValue1][&propertyName2][=propertyValue2]...
|
||||||
|
|
||||||
|
static final String MASTER_ADDRESS = "hbase.master";
|
||||||
|
// TODO: Support 'local': i.e. default of all running in single
|
||||||
|
// process. Same for regionserver.
|
||||||
|
static final String DEFAULT_MASTER_ADDRESS = "localhost:60000";
|
||||||
|
static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
|
||||||
|
static final String DEFAULT_REGIONSERVER_ADDRESS =
|
||||||
|
"localhost:60010";
|
||||||
static final String HREGION_DIR = "hbase.regiondir";
|
static final String HREGION_DIR = "hbase.regiondir";
|
||||||
static final String DEFAULT_HREGION_DIR = "/hbase";
|
static final String DEFAULT_HREGION_DIR = "/hbase";
|
||||||
static final String HREGIONDIR_PREFIX = "hregion_";
|
static final String HREGIONDIR_PREFIX = "hregion_";
|
||||||
|
@ -37,10 +48,10 @@ public interface HConstants {
|
||||||
// Do we ever need to know all the information that we are storing?
|
// Do we ever need to know all the information that we are storing?
|
||||||
|
|
||||||
static final Text ROOT_TABLE_NAME = new Text("--ROOT--");
|
static final Text ROOT_TABLE_NAME = new Text("--ROOT--");
|
||||||
static final Text ROOT_COLUMN_FAMILY = new Text("info");
|
static final Text ROOT_COLUMN_FAMILY = new Text("info:");
|
||||||
static final Text ROOT_COL_REGIONINFO = new Text(ROOT_COLUMN_FAMILY + ":" + "regioninfo");
|
static final Text ROOT_COL_REGIONINFO = new Text(ROOT_COLUMN_FAMILY + "regioninfo");
|
||||||
static final Text ROOT_COL_SERVER = new Text(ROOT_COLUMN_FAMILY + ":" + "server");
|
static final Text ROOT_COL_SERVER = new Text(ROOT_COLUMN_FAMILY + "server");
|
||||||
static final Text ROOT_COL_STARTCODE = new Text(ROOT_COLUMN_FAMILY + ":" + "serverstartcode");
|
static final Text ROOT_COL_STARTCODE = new Text(ROOT_COLUMN_FAMILY + "serverstartcode");
|
||||||
|
|
||||||
static final Text META_TABLE_NAME = new Text("--META--");
|
static final Text META_TABLE_NAME = new Text("--META--");
|
||||||
static final Text META_COLUMN_FAMILY = new Text(ROOT_COLUMN_FAMILY);
|
static final Text META_COLUMN_FAMILY = new Text(ROOT_COLUMN_FAMILY);
|
||||||
|
|
|
@ -1,90 +0,0 @@
|
||||||
/**
|
|
||||||
* Copyright 2007 The Apache Software Foundation
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase;
|
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
* HLocking is a set of lock primitives that are pretty helpful in a few places
|
|
||||||
* around the HBase code. For each independent entity that needs locking, create
|
|
||||||
* a new HLocking instance.
|
|
||||||
******************************************************************************/
|
|
||||||
public class HLocking {
|
|
||||||
Integer readerLock = new Integer(0);
|
|
||||||
Integer writerLock = new Integer(0);
|
|
||||||
int numReaders = 0;
|
|
||||||
int numWriters = 0;
|
|
||||||
|
|
||||||
public HLocking() {
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Caller needs the nonexclusive read-lock */
|
|
||||||
public void obtainReadLock() {
|
|
||||||
synchronized(readerLock) {
|
|
||||||
synchronized(writerLock) {
|
|
||||||
while(numWriters > 0) {
|
|
||||||
try {
|
|
||||||
writerLock.wait();
|
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
numReaders++;
|
|
||||||
readerLock.notifyAll();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Caller is finished with the nonexclusive read-lock */
|
|
||||||
public void releaseReadLock() {
|
|
||||||
synchronized(readerLock) {
|
|
||||||
synchronized(writerLock) {
|
|
||||||
numReaders--;
|
|
||||||
readerLock.notifyAll();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Caller needs the exclusive write-lock */
|
|
||||||
public void obtainWriteLock() {
|
|
||||||
synchronized(readerLock) {
|
|
||||||
synchronized(writerLock) {
|
|
||||||
while(numReaders > 0) {
|
|
||||||
try {
|
|
||||||
readerLock.wait();
|
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while(numWriters > 0) {
|
|
||||||
try {
|
|
||||||
writerLock.wait();
|
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
numWriters++;
|
|
||||||
writerLock.notifyAll();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Caller is finished with the write lock */
|
|
||||||
public void releaseWriteLock() {
|
|
||||||
synchronized(readerLock) {
|
|
||||||
synchronized(writerLock) {
|
|
||||||
numWriters--;
|
|
||||||
writerLock.notifyAll();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -101,12 +101,12 @@ public class HLog {
|
||||||
newlog.close();
|
newlog.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fs.exists(srcDir)) {
|
if(fs.exists(srcDir)) {
|
||||||
|
|
||||||
if (!fs.delete(srcDir)) {
|
if(! fs.delete(srcDir)) {
|
||||||
LOG.error("Cannot delete: " + srcDir);
|
LOG.error("Cannot delete: " + srcDir);
|
||||||
|
|
||||||
if (!FileUtil.fullyDelete(new File(srcDir.toString()))) {
|
if(! FileUtil.fullyDelete(new File(srcDir.toString()))) {
|
||||||
throw new IOException("Cannot delete: " + srcDir);
|
throw new IOException("Cannot delete: " + srcDir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ public class HLog {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.logSeqNum = 0;
|
this.logSeqNum = 0;
|
||||||
|
|
||||||
if (fs.exists(dir)) {
|
if(fs.exists(dir)) {
|
||||||
throw new IOException("Target HLog directory already exists: " + dir);
|
throw new IOException("Target HLog directory already exists: " + dir);
|
||||||
}
|
}
|
||||||
fs.mkdirs(dir);
|
fs.mkdirs(dir);
|
||||||
|
@ -154,7 +154,7 @@ public class HLog {
|
||||||
|
|
||||||
Vector<Path> toDeleteList = new Vector<Path>();
|
Vector<Path> toDeleteList = new Vector<Path>();
|
||||||
synchronized(this) {
|
synchronized(this) {
|
||||||
if (closed) {
|
if(closed) {
|
||||||
throw new IOException("Cannot roll log; log is closed");
|
throw new IOException("Cannot roll log; log is closed");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,10 +174,10 @@ public class HLog {
|
||||||
|
|
||||||
// Close the current writer (if any), and grab a new one.
|
// Close the current writer (if any), and grab a new one.
|
||||||
|
|
||||||
if (writer != null) {
|
if(writer != null) {
|
||||||
writer.close();
|
writer.close();
|
||||||
|
|
||||||
if (filenum > 0) {
|
if(filenum > 0) {
|
||||||
outputfiles.put(logSeqNum-1, computeFilename(filenum-1));
|
outputfiles.put(logSeqNum-1, computeFilename(filenum-1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -192,10 +192,10 @@ public class HLog {
|
||||||
// over all the regions.
|
// over all the regions.
|
||||||
|
|
||||||
long oldestOutstandingSeqNum = Long.MAX_VALUE;
|
long oldestOutstandingSeqNum = Long.MAX_VALUE;
|
||||||
for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext();) {
|
for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext(); ) {
|
||||||
long curSeqNum = it.next().longValue();
|
long curSeqNum = it.next().longValue();
|
||||||
|
|
||||||
if (curSeqNum < oldestOutstandingSeqNum) {
|
if(curSeqNum < oldestOutstandingSeqNum) {
|
||||||
oldestOutstandingSeqNum = curSeqNum;
|
oldestOutstandingSeqNum = curSeqNum;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -205,10 +205,10 @@ public class HLog {
|
||||||
|
|
||||||
LOG.debug("removing old log files");
|
LOG.debug("removing old log files");
|
||||||
|
|
||||||
for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext(); ) {
|
||||||
long maxSeqNum = it.next().longValue();
|
long maxSeqNum = it.next().longValue();
|
||||||
|
|
||||||
if (maxSeqNum < oldestOutstandingSeqNum) {
|
if(maxSeqNum < oldestOutstandingSeqNum) {
|
||||||
Path p = outputfiles.get(maxSeqNum);
|
Path p = outputfiles.get(maxSeqNum);
|
||||||
it.remove();
|
it.remove();
|
||||||
toDeleteList.add(p);
|
toDeleteList.add(p);
|
||||||
|
@ -221,7 +221,7 @@ public class HLog {
|
||||||
|
|
||||||
// Actually delete them, if any!
|
// Actually delete them, if any!
|
||||||
|
|
||||||
for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext();) {
|
for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext(); ) {
|
||||||
Path p = it.next();
|
Path p = it.next();
|
||||||
fs.delete(p);
|
fs.delete(p);
|
||||||
}
|
}
|
||||||
|
@ -262,7 +262,7 @@ public class HLog {
|
||||||
* We need to seize a lock on the writer so that writes are atomic.
|
* We need to seize a lock on the writer so that writes are atomic.
|
||||||
*/
|
*/
|
||||||
public synchronized void append(Text regionName, Text tableName, Text row, TreeMap<Text, byte[]> columns, long timestamp) throws IOException {
|
public synchronized void append(Text regionName, Text tableName, Text row, TreeMap<Text, byte[]> columns, long timestamp) throws IOException {
|
||||||
if (closed) {
|
if(closed) {
|
||||||
throw new IOException("Cannot append; log is closed");
|
throw new IOException("Cannot append; log is closed");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,12 +273,12 @@ public class HLog {
|
||||||
// that don't have any flush yet, the relevant operation is the
|
// that don't have any flush yet, the relevant operation is the
|
||||||
// first one that's been added.
|
// first one that's been added.
|
||||||
|
|
||||||
if (regionToLastFlush.get(regionName) == null) {
|
if(regionToLastFlush.get(regionName) == null) {
|
||||||
regionToLastFlush.put(regionName, seqNum[0]);
|
regionToLastFlush.put(regionName, seqNum[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text column = it.next();
|
Text column = it.next();
|
||||||
byte[] val = columns.get(column);
|
byte[] val = columns.get(column);
|
||||||
HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum[counter++]);
|
HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum[counter++]);
|
||||||
|
@ -333,16 +333,16 @@ public class HLog {
|
||||||
|
|
||||||
/** Complete the cache flush */
|
/** Complete the cache flush */
|
||||||
public synchronized void completeCacheFlush(Text regionName, Text tableName, long logSeqId) throws IOException {
|
public synchronized void completeCacheFlush(Text regionName, Text tableName, long logSeqId) throws IOException {
|
||||||
if (closed) {
|
if(closed) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!insideCacheFlush) {
|
if(! insideCacheFlush) {
|
||||||
throw new IOException("Impossible situation: inside completeCacheFlush(), but 'insideCacheFlush' flag is false");
|
throw new IOException("Impossible situation: inside completeCacheFlush(), but 'insideCacheFlush' flag is false");
|
||||||
}
|
}
|
||||||
|
|
||||||
writer.append(new HLogKey(regionName, tableName, HLog.METAROW, logSeqId),
|
writer.append(new HLogKey(regionName, tableName, HLog.METAROW, logSeqId),
|
||||||
new HLogEdit(HLog.METACOLUMN, HStoreKey.COMPLETE_CACHEFLUSH, System.currentTimeMillis()));
|
new HLogEdit(HLog.METACOLUMN, HStoreKey.COMPLETE_CACHEFLUSH, System.currentTimeMillis()));
|
||||||
numEntries++;
|
numEntries++;
|
||||||
|
|
||||||
// Remember the most-recent flush for each region.
|
// Remember the most-recent flush for each region.
|
||||||
|
@ -353,4 +353,4 @@ public class HLog {
|
||||||
insideCacheFlush = false;
|
insideCacheFlush = false;
|
||||||
notifyAll();
|
notifyAll();
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -67,5 +67,4 @@ public class HLogEdit implements Writable {
|
||||||
this.val.readFields(in);
|
this.val.readFields(in);
|
||||||
this.timestamp = in.readLong();
|
this.timestamp = in.readLong();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,10 +80,10 @@ public class HLogKey implements WritableComparable {
|
||||||
HLogKey other = (HLogKey) o;
|
HLogKey other = (HLogKey) o;
|
||||||
int result = this.regionName.compareTo(other.regionName);
|
int result = this.regionName.compareTo(other.regionName);
|
||||||
|
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
result = this.row.compareTo(other.row);
|
result = this.row.compareTo(other.row);
|
||||||
|
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
|
|
||||||
if (this.logSeqNum < other.logSeqNum) {
|
if (this.logSeqNum < other.logSeqNum) {
|
||||||
result = -1;
|
result = -1;
|
||||||
|
@ -113,5 +113,4 @@ public class HLogKey implements WritableComparable {
|
||||||
this.row.readFields(in);
|
this.row.readFields(in);
|
||||||
this.logSeqNum = in.readLong();
|
this.logSeqNum = in.readLong();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -15,7 +15,8 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import org.apache.hadoop.io.*;
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -23,7 +24,7 @@ import java.io.IOException;
|
||||||
* Clients interact with the HMasterInterface to gain access to meta-level HBase
|
* Clients interact with the HMasterInterface to gain access to meta-level HBase
|
||||||
* functionality, like finding an HRegionServer and creating/destroying tables.
|
* functionality, like finding an HRegionServer and creating/destroying tables.
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
public interface HMasterInterface {
|
public interface HMasterInterface extends VersionedProtocol {
|
||||||
public static final long versionID = 1L; // initial version
|
public static final long versionID = 1L; // initial version
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
|
@ -15,13 +15,15 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.IOException;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* HRegionServers interact with the HMasterRegionInterface to report on local
|
* HRegionServers interact with the HMasterRegionInterface to report on local
|
||||||
* goings-on and to obtain data-handling instructions from the HMaster.
|
* goings-on and to obtain data-handling instructions from the HMaster.
|
||||||
*********************************************/
|
*********************************************/
|
||||||
public interface HMasterRegionInterface {
|
public interface HMasterRegionInterface extends VersionedProtocol {
|
||||||
public static final long versionId = 1L;
|
public static final long versionID = 1L;
|
||||||
public void regionServerStartup(HServerInfo info) throws IOException;
|
public void regionServerStartup(HServerInfo info) throws IOException;
|
||||||
public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[]) throws IOException;
|
public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[]) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,8 @@ import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.*;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
import java.util.concurrent.locks.ReadWriteLock;
|
||||||
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* The HMemcache holds in-memory modifications to the HRegion. This is really a
|
* The HMemcache holds in-memory modifications to the HRegion. This is really a
|
||||||
|
@ -31,14 +33,14 @@ public class HMemcache {
|
||||||
private static final Log LOG = LogFactory.getLog(HMemcache.class);
|
private static final Log LOG = LogFactory.getLog(HMemcache.class);
|
||||||
|
|
||||||
TreeMap<HStoreKey, BytesWritable> memcache
|
TreeMap<HStoreKey, BytesWritable> memcache
|
||||||
= new TreeMap<HStoreKey, BytesWritable>();
|
= new TreeMap<HStoreKey, BytesWritable>();
|
||||||
|
|
||||||
Vector<TreeMap<HStoreKey, BytesWritable>> history
|
Vector<TreeMap<HStoreKey, BytesWritable>> history
|
||||||
= new Vector<TreeMap<HStoreKey, BytesWritable>>();
|
= new Vector<TreeMap<HStoreKey, BytesWritable>>();
|
||||||
|
|
||||||
TreeMap<HStoreKey, BytesWritable> snapshot = null;
|
TreeMap<HStoreKey, BytesWritable> snapshot = null;
|
||||||
|
|
||||||
HLocking locking = new HLocking();
|
ReadWriteLock locker = new ReentrantReadWriteLock();
|
||||||
|
|
||||||
public HMemcache() {
|
public HMemcache() {
|
||||||
}
|
}
|
||||||
|
@ -52,23 +54,27 @@ public class HMemcache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We want to return a snapshot of the current HMemcache with a known HLog
|
* Returns a snapshot of the current HMemcache with a known HLog
|
||||||
* sequence number at the same time.
|
* sequence number at the same time.
|
||||||
*
|
|
||||||
* Return both the frozen HMemcache TreeMap, as well as the HLog seq number.
|
|
||||||
*
|
*
|
||||||
* We need to prevent any writing to the cache during this time, so we obtain
|
* We need to prevent any writing to the cache during this time,
|
||||||
* a write lock for the duration of the operation.
|
* so we obtain a write lock for the duration of the operation.
|
||||||
|
*
|
||||||
|
* <p>If this method returns non-null, client must call
|
||||||
|
* {@link #deleteSnapshot()} to clear 'snapshot-in-progress'
|
||||||
|
* state when finished with the returned {@link Snapshot}.
|
||||||
|
*
|
||||||
|
* @return frozen HMemcache TreeMap and HLog sequence number.
|
||||||
*/
|
*/
|
||||||
public Snapshot snapshotMemcacheForLog(HLog log) throws IOException {
|
public Snapshot snapshotMemcacheForLog(HLog log) throws IOException {
|
||||||
Snapshot retval = new Snapshot();
|
Snapshot retval = new Snapshot();
|
||||||
|
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
if (snapshot != null) {
|
if(snapshot != null) {
|
||||||
throw new IOException("Snapshot in progress!");
|
throw new IOException("Snapshot in progress!");
|
||||||
}
|
}
|
||||||
if (memcache.size() == 0) {
|
if(memcache.size() == 0) {
|
||||||
LOG.debug("memcache empty. Skipping snapshot");
|
LOG.debug("memcache empty. Skipping snapshot");
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -86,7 +92,7 @@ public class HMemcache {
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,19 +102,19 @@ public class HMemcache {
|
||||||
* Modifying the structure means we need to obtain a writelock.
|
* Modifying the structure means we need to obtain a writelock.
|
||||||
*/
|
*/
|
||||||
public void deleteSnapshot() throws IOException {
|
public void deleteSnapshot() throws IOException {
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (snapshot == null) {
|
if(snapshot == null) {
|
||||||
throw new IOException("Snapshot not present!");
|
throw new IOException("Snapshot not present!");
|
||||||
}
|
}
|
||||||
LOG.debug("deleting snapshot");
|
LOG.debug("deleting snapshot");
|
||||||
|
|
||||||
for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator();
|
for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator();
|
||||||
it.hasNext();) {
|
it.hasNext(); ) {
|
||||||
|
|
||||||
TreeMap<HStoreKey, BytesWritable> cur = it.next();
|
TreeMap<HStoreKey, BytesWritable> cur = it.next();
|
||||||
if (snapshot == cur) {
|
if(snapshot == cur) {
|
||||||
it.remove();
|
it.remove();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -118,7 +124,7 @@ public class HMemcache {
|
||||||
LOG.debug("snapshot deleted");
|
LOG.debug("snapshot deleted");
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,9 +134,9 @@ public class HMemcache {
|
||||||
* Operation uses a write lock.
|
* Operation uses a write lock.
|
||||||
*/
|
*/
|
||||||
public void add(Text row, TreeMap<Text, byte[]> columns, long timestamp) {
|
public void add(Text row, TreeMap<Text, byte[]> columns, long timestamp) {
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text column = it.next();
|
Text column = it.next();
|
||||||
byte[] val = columns.get(column);
|
byte[] val = columns.get(column);
|
||||||
|
|
||||||
|
@ -139,7 +145,7 @@ public class HMemcache {
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,13 +156,13 @@ public class HMemcache {
|
||||||
*/
|
*/
|
||||||
public byte[][] get(HStoreKey key, int numVersions) {
|
public byte[][] get(HStoreKey key, int numVersions) {
|
||||||
Vector<byte[]> results = new Vector<byte[]>();
|
Vector<byte[]> results = new Vector<byte[]>();
|
||||||
locking.obtainReadLock();
|
this.locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
Vector<byte[]> result = get(memcache, key, numVersions-results.size());
|
Vector<byte[]> result = get(memcache, key, numVersions-results.size());
|
||||||
results.addAll(0, result);
|
results.addAll(0, result);
|
||||||
|
|
||||||
for(int i = history.size()-1; i >= 0; i--) {
|
for(int i = history.size()-1; i >= 0; i--) {
|
||||||
if (numVersions > 0 && results.size() >= numVersions) {
|
if(numVersions > 0 && results.size() >= numVersions) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +170,7 @@ public class HMemcache {
|
||||||
results.addAll(results.size(), result);
|
results.addAll(results.size(), result);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (results.size() == 0) {
|
if(results.size() == 0) {
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -172,7 +178,7 @@ public class HMemcache {
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
this.locker.readLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,7 +190,7 @@ public class HMemcache {
|
||||||
*/
|
*/
|
||||||
public TreeMap<Text, byte[]> getFull(HStoreKey key) throws IOException {
|
public TreeMap<Text, byte[]> getFull(HStoreKey key) throws IOException {
|
||||||
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||||
locking.obtainReadLock();
|
this.locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
internalGetFull(memcache, key, results);
|
internalGetFull(memcache, key, results);
|
||||||
for(int i = history.size()-1; i >= 0; i--) {
|
for(int i = history.size()-1; i >= 0; i--) {
|
||||||
|
@ -194,25 +200,25 @@ public class HMemcache {
|
||||||
return results;
|
return results;
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
this.locker.readLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void internalGetFull(TreeMap<HStoreKey, BytesWritable> map, HStoreKey key,
|
void internalGetFull(TreeMap<HStoreKey, BytesWritable> map, HStoreKey key,
|
||||||
TreeMap<Text, byte[]> results) {
|
TreeMap<Text, byte[]> results) {
|
||||||
|
|
||||||
SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(key);
|
SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(key);
|
||||||
|
|
||||||
for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext();) {
|
for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext(); ) {
|
||||||
HStoreKey itKey = it.next();
|
HStoreKey itKey = it.next();
|
||||||
Text itCol = itKey.getColumn();
|
Text itCol = itKey.getColumn();
|
||||||
|
|
||||||
if (results.get(itCol) == null
|
if(results.get(itCol) == null
|
||||||
&& key.matchesWithoutColumn(itKey)) {
|
&& key.matchesWithoutColumn(itKey)) {
|
||||||
BytesWritable val = tailMap.get(itKey);
|
BytesWritable val = tailMap.get(itKey);
|
||||||
results.put(itCol, val.get());
|
results.put(itCol, val.get());
|
||||||
|
|
||||||
} else if (key.getRow().compareTo(itKey.getRow()) > 0) {
|
} else if(key.getRow().compareTo(itKey.getRow()) > 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -232,15 +238,15 @@ public class HMemcache {
|
||||||
HStoreKey curKey = new HStoreKey(key.getRow(), key.getColumn(), key.getTimestamp());
|
HStoreKey curKey = new HStoreKey(key.getRow(), key.getColumn(), key.getTimestamp());
|
||||||
SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(curKey);
|
SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(curKey);
|
||||||
|
|
||||||
for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext();) {
|
for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext(); ) {
|
||||||
HStoreKey itKey = it.next();
|
HStoreKey itKey = it.next();
|
||||||
|
|
||||||
if (itKey.matchesRowCol(curKey)) {
|
if(itKey.matchesRowCol(curKey)) {
|
||||||
result.add(tailMap.get(itKey).get());
|
result.add(tailMap.get(itKey).get());
|
||||||
curKey.setVersion(itKey.getTimestamp() - 1);
|
curKey.setVersion(itKey.getTimestamp() - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numVersions > 0 && result.size() >= numVersions) {
|
if(numVersions > 0 && result.size() >= numVersions) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -251,7 +257,7 @@ public class HMemcache {
|
||||||
* Return a scanner over the keys in the HMemcache
|
* Return a scanner over the keys in the HMemcache
|
||||||
*/
|
*/
|
||||||
public HScannerInterface getScanner(long timestamp, Text targetCols[], Text firstRow)
|
public HScannerInterface getScanner(long timestamp, Text targetCols[], Text firstRow)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
return new HMemcacheScanner(timestamp, targetCols, firstRow);
|
return new HMemcacheScanner(timestamp, targetCols, firstRow);
|
||||||
}
|
}
|
||||||
|
@ -267,16 +273,16 @@ public class HMemcache {
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
|
public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
super(timestamp, targetCols);
|
super(timestamp, targetCols);
|
||||||
|
|
||||||
locking.obtainReadLock();
|
locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
this.backingMaps = new TreeMap[history.size() + 1];
|
this.backingMaps = new TreeMap[history.size() + 1];
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator();
|
for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator();
|
||||||
it.hasNext();) {
|
it.hasNext(); ) {
|
||||||
|
|
||||||
backingMaps[i++] = it.next();
|
backingMaps[i++] = it.next();
|
||||||
}
|
}
|
||||||
|
@ -290,7 +296,7 @@ public class HMemcache {
|
||||||
|
|
||||||
HStoreKey firstKey = new HStoreKey(firstRow);
|
HStoreKey firstKey = new HStoreKey(firstRow);
|
||||||
for(i = 0; i < backingMaps.length; i++) {
|
for(i = 0; i < backingMaps.length; i++) {
|
||||||
if (firstRow.getLength() != 0) {
|
if(firstRow.getLength() != 0) {
|
||||||
keyIterators[i] = backingMaps[i].tailMap(firstKey).keySet().iterator();
|
keyIterators[i] = backingMaps[i].tailMap(firstKey).keySet().iterator();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -298,10 +304,10 @@ public class HMemcache {
|
||||||
}
|
}
|
||||||
|
|
||||||
while(getNext(i)) {
|
while(getNext(i)) {
|
||||||
if (!findFirstRow(i, firstRow)) {
|
if(! findFirstRow(i, firstRow)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (columnMatch(i)) {
|
if(columnMatch(i)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -331,7 +337,7 @@ public class HMemcache {
|
||||||
* @return - true if there is more data available
|
* @return - true if there is more data available
|
||||||
*/
|
*/
|
||||||
boolean getNext(int i) {
|
boolean getNext(int i) {
|
||||||
if (!keyIterators[i].hasNext()) {
|
if(! keyIterators[i].hasNext()) {
|
||||||
closeSubScanner(i);
|
closeSubScanner(i);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -350,16 +356,16 @@ public class HMemcache {
|
||||||
|
|
||||||
/** Shut down map iterators, and release the lock */
|
/** Shut down map iterators, and release the lock */
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
if (!scannerClosed) {
|
if(! scannerClosed) {
|
||||||
try {
|
try {
|
||||||
for(int i = 0; i < keys.length; i++) {
|
for(int i = 0; i < keys.length; i++) {
|
||||||
if (keyIterators[i] != null) {
|
if(keyIterators[i] != null) {
|
||||||
closeSubScanner(i);
|
closeSubScanner(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
locker.readLock().unlock();
|
||||||
scannerClosed = true;
|
scannerClosed = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,13 +67,13 @@ public class HMsg implements Writable {
|
||||||
// Writable
|
// Writable
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
public void write(DataOutput out) throws IOException {
|
public void write(DataOutput out) throws IOException {
|
||||||
out.writeByte(msg);
|
out.writeByte(msg);
|
||||||
info.write(out);
|
info.write(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void readFields(DataInput in) throws IOException {
|
public void readFields(DataInput in) throws IOException {
|
||||||
this.msg = in.readByte();
|
this.msg = in.readByte();
|
||||||
this.info.readFields(in);
|
this.info.readFields(in);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,21 +61,21 @@ public class HRegion implements HConstants {
|
||||||
// Make sure that srcA comes first; important for key-ordering during
|
// Make sure that srcA comes first; important for key-ordering during
|
||||||
// write of the merged file.
|
// write of the merged file.
|
||||||
|
|
||||||
if (srcA.getStartKey() == null) {
|
if(srcA.getStartKey() == null) {
|
||||||
if (srcB.getStartKey() == null) {
|
if(srcB.getStartKey() == null) {
|
||||||
throw new IOException("Cannot merge two regions with null start key");
|
throw new IOException("Cannot merge two regions with null start key");
|
||||||
}
|
}
|
||||||
// A's start key is null but B's isn't. Assume A comes before B
|
// A's start key is null but B's isn't. Assume A comes before B
|
||||||
|
|
||||||
} else if ((srcB.getStartKey() == null) // A is not null but B is
|
} else if((srcB.getStartKey() == null) // A is not null but B is
|
||||||
|| (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
|
|| (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
|
||||||
|
|
||||||
HRegion tmp = srcA;
|
HRegion tmp = srcA;
|
||||||
srcA = srcB;
|
srcA = srcB;
|
||||||
srcB = tmp;
|
srcB = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!srcA.getEndKey().equals(srcB.getStartKey())) {
|
if (! srcA.getEndKey().equals(srcB.getStartKey())) {
|
||||||
throw new IOException("Cannot merge non-adjacent regions");
|
throw new IOException("Cannot merge non-adjacent regions");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ public class HRegion implements HConstants {
|
||||||
Text endKey = srcB.getEndKey();
|
Text endKey = srcB.getEndKey();
|
||||||
|
|
||||||
Path merges = new Path(srcA.getRegionDir(), MERGEDIR);
|
Path merges = new Path(srcA.getRegionDir(), MERGEDIR);
|
||||||
if (!fs.exists(merges)) {
|
if(! fs.exists(merges)) {
|
||||||
fs.mkdirs(merges);
|
fs.mkdirs(merges);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,14 +98,14 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
Path newRegionDir = HStoreFile.getHRegionDir(merges, newRegionInfo.regionName);
|
Path newRegionDir = HStoreFile.getHRegionDir(merges, newRegionInfo.regionName);
|
||||||
|
|
||||||
if (fs.exists(newRegionDir)) {
|
if(fs.exists(newRegionDir)) {
|
||||||
throw new IOException("Cannot merge; target file collision at " + newRegionDir);
|
throw new IOException("Cannot merge; target file collision at " + newRegionDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.info("starting merge of regions: " + srcA.getRegionName() + " and "
|
LOG.info("starting merge of regions: " + srcA.getRegionName() + " and "
|
||||||
+ srcB.getRegionName() + " new region start key is '"
|
+ srcB.getRegionName() + " new region start key is '"
|
||||||
+ (startKey == null ? "" : startKey) + "', end key is '"
|
+ (startKey == null ? "" : startKey) + "', end key is '"
|
||||||
+ (endKey == null ? "" : endKey) + "'");
|
+ (endKey == null ? "" : endKey) + "'");
|
||||||
|
|
||||||
// Flush each of the sources, and merge their files into a single
|
// Flush each of the sources, and merge their files into a single
|
||||||
// target for each column family.
|
// target for each column family.
|
||||||
|
@ -114,10 +114,10 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
TreeSet<HStoreFile> alreadyMerged = new TreeSet<HStoreFile>();
|
TreeSet<HStoreFile> alreadyMerged = new TreeSet<HStoreFile>();
|
||||||
TreeMap<Text, Vector<HStoreFile>> filesToMerge = new TreeMap<Text, Vector<HStoreFile>>();
|
TreeMap<Text, Vector<HStoreFile>> filesToMerge = new TreeMap<Text, Vector<HStoreFile>>();
|
||||||
for(Iterator<HStoreFile> it = srcA.flushcache(true).iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = srcA.flushcache(true).iterator(); it.hasNext(); ) {
|
||||||
HStoreFile src = it.next();
|
HStoreFile src = it.next();
|
||||||
Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
|
Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
|
||||||
if (v == null) {
|
if(v == null) {
|
||||||
v = new Vector<HStoreFile>();
|
v = new Vector<HStoreFile>();
|
||||||
filesToMerge.put(src.getColFamily(), v);
|
filesToMerge.put(src.getColFamily(), v);
|
||||||
}
|
}
|
||||||
|
@ -126,10 +126,10 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
LOG.debug("flushing and getting file names for region " + srcB.getRegionName());
|
LOG.debug("flushing and getting file names for region " + srcB.getRegionName());
|
||||||
|
|
||||||
for(Iterator<HStoreFile> it = srcB.flushcache(true).iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = srcB.flushcache(true).iterator(); it.hasNext(); ) {
|
||||||
HStoreFile src = it.next();
|
HStoreFile src = it.next();
|
||||||
Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
|
Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
|
||||||
if (v == null) {
|
if(v == null) {
|
||||||
v = new Vector<HStoreFile>();
|
v = new Vector<HStoreFile>();
|
||||||
filesToMerge.put(src.getColFamily(), v);
|
filesToMerge.put(src.getColFamily(), v);
|
||||||
}
|
}
|
||||||
|
@ -138,11 +138,11 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
LOG.debug("merging stores");
|
LOG.debug("merging stores");
|
||||||
|
|
||||||
for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text colFamily = it.next();
|
Text colFamily = it.next();
|
||||||
Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
|
Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
|
||||||
HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName,
|
HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName,
|
||||||
colFamily, Math.abs(rand.nextLong()));
|
colFamily, Math.abs(rand.nextLong()));
|
||||||
|
|
||||||
dst.mergeStoreFiles(srcFiles, fs, conf);
|
dst.mergeStoreFiles(srcFiles, fs, conf);
|
||||||
alreadyMerged.addAll(srcFiles);
|
alreadyMerged.addAll(srcFiles);
|
||||||
|
@ -153,15 +153,15 @@ public class HRegion implements HConstants {
|
||||||
// of any last-minute inserts
|
// of any last-minute inserts
|
||||||
|
|
||||||
LOG.debug("flushing changes since start of merge for region "
|
LOG.debug("flushing changes since start of merge for region "
|
||||||
+ srcA.getRegionName());
|
+ srcA.getRegionName());
|
||||||
|
|
||||||
filesToMerge.clear();
|
filesToMerge.clear();
|
||||||
for(Iterator<HStoreFile> it = srcA.close().iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = srcA.close().iterator(); it.hasNext(); ) {
|
||||||
HStoreFile src = it.next();
|
HStoreFile src = it.next();
|
||||||
|
|
||||||
if (!alreadyMerged.contains(src)) {
|
if(! alreadyMerged.contains(src)) {
|
||||||
Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
|
Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
|
||||||
if (v == null) {
|
if(v == null) {
|
||||||
v = new Vector<HStoreFile>();
|
v = new Vector<HStoreFile>();
|
||||||
filesToMerge.put(src.getColFamily(), v);
|
filesToMerge.put(src.getColFamily(), v);
|
||||||
}
|
}
|
||||||
|
@ -170,14 +170,14 @@ public class HRegion implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.debug("flushing changes since start of merge for region "
|
LOG.debug("flushing changes since start of merge for region "
|
||||||
+ srcB.getRegionName());
|
+ srcB.getRegionName());
|
||||||
|
|
||||||
for(Iterator<HStoreFile> it = srcB.close().iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = srcB.close().iterator(); it.hasNext(); ) {
|
||||||
HStoreFile src = it.next();
|
HStoreFile src = it.next();
|
||||||
|
|
||||||
if (!alreadyMerged.contains(src)) {
|
if(! alreadyMerged.contains(src)) {
|
||||||
Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
|
Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
|
||||||
if (v == null) {
|
if(v == null) {
|
||||||
v = new Vector<HStoreFile>();
|
v = new Vector<HStoreFile>();
|
||||||
filesToMerge.put(src.getColFamily(), v);
|
filesToMerge.put(src.getColFamily(), v);
|
||||||
}
|
}
|
||||||
|
@ -187,11 +187,11 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
LOG.debug("merging changes since start of merge");
|
LOG.debug("merging changes since start of merge");
|
||||||
|
|
||||||
for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text colFamily = it.next();
|
Text colFamily = it.next();
|
||||||
Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
|
Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
|
||||||
HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName,
|
HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName,
|
||||||
colFamily, Math.abs(rand.nextLong()));
|
colFamily, Math.abs(rand.nextLong()));
|
||||||
|
|
||||||
dst.mergeStoreFiles(srcFiles, fs, conf);
|
dst.mergeStoreFiles(srcFiles, fs, conf);
|
||||||
}
|
}
|
||||||
|
@ -199,7 +199,7 @@ public class HRegion implements HConstants {
|
||||||
// Done
|
// Done
|
||||||
|
|
||||||
HRegion dstRegion = new HRegion(dir, log, fs, conf, newRegionInfo,
|
HRegion dstRegion = new HRegion(dir, log, fs, conf, newRegionInfo,
|
||||||
newRegionDir, null);
|
newRegionDir, null);
|
||||||
|
|
||||||
// Get rid of merges directory
|
// Get rid of merges directory
|
||||||
|
|
||||||
|
@ -284,7 +284,7 @@ public class HRegion implements HConstants {
|
||||||
* written-to before), then read it from the supplied path.
|
* written-to before), then read it from the supplied path.
|
||||||
*/
|
*/
|
||||||
public HRegion(Path dir, HLog log, FileSystem fs, Configuration conf,
|
public HRegion(Path dir, HLog log, FileSystem fs, Configuration conf,
|
||||||
HRegionInfo regionInfo, Path initialFiles, Path oldLogFile) throws IOException {
|
HRegionInfo regionInfo, Path initialFiles, Path oldLogFile) throws IOException {
|
||||||
|
|
||||||
this.dir = dir;
|
this.dir = dir;
|
||||||
this.log = log;
|
this.log = log;
|
||||||
|
@ -303,29 +303,29 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
// Move prefab HStore files into place (if any)
|
// Move prefab HStore files into place (if any)
|
||||||
|
|
||||||
if (initialFiles != null && fs.exists(initialFiles)) {
|
if(initialFiles != null && fs.exists(initialFiles)) {
|
||||||
fs.rename(initialFiles, regiondir);
|
fs.rename(initialFiles, regiondir);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load in all the HStores.
|
// Load in all the HStores.
|
||||||
|
|
||||||
for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
|
for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
|
||||||
it.hasNext();) {
|
it.hasNext(); ) {
|
||||||
|
|
||||||
Text colFamily = it.next();
|
Text colFamily = HStoreKey.extractFamily(it.next());
|
||||||
stores.put(colFamily, new HStore(dir, this.regionInfo.regionName, colFamily,
|
stores.put(colFamily, new HStore(dir, this.regionInfo.regionName, colFamily,
|
||||||
this.regionInfo.tableDesc.getMaxVersions(), fs, oldLogFile, conf));
|
this.regionInfo.tableDesc.getMaxVersions(), fs, oldLogFile, conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get rid of any splits or merges that were lost in-progress
|
// Get rid of any splits or merges that were lost in-progress
|
||||||
|
|
||||||
Path splits = new Path(regiondir, SPLITDIR);
|
Path splits = new Path(regiondir, SPLITDIR);
|
||||||
if (fs.exists(splits)) {
|
if(fs.exists(splits)) {
|
||||||
fs.delete(splits);
|
fs.delete(splits);
|
||||||
}
|
}
|
||||||
|
|
||||||
Path merges = new Path(regiondir, MERGEDIR);
|
Path merges = new Path(regiondir, MERGEDIR);
|
||||||
if (fs.exists(merges)) {
|
if(fs.exists(merges)) {
|
||||||
fs.delete(merges);
|
fs.delete(merges);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,6 +345,7 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
/** Closes and deletes this HRegion. Called when doing a table deletion, for example */
|
/** Closes and deletes this HRegion. Called when doing a table deletion, for example */
|
||||||
public void closeAndDelete() throws IOException {
|
public void closeAndDelete() throws IOException {
|
||||||
|
LOG.info("deleting region: " + regionInfo.regionName);
|
||||||
close();
|
close();
|
||||||
fs.delete(regiondir);
|
fs.delete(regiondir);
|
||||||
}
|
}
|
||||||
|
@ -362,7 +363,7 @@ public class HRegion implements HConstants {
|
||||||
public Vector<HStoreFile> close() throws IOException {
|
public Vector<HStoreFile> close() throws IOException {
|
||||||
boolean shouldClose = false;
|
boolean shouldClose = false;
|
||||||
synchronized(writestate) {
|
synchronized(writestate) {
|
||||||
if (writestate.closed) {
|
if(writestate.closed) {
|
||||||
LOG.info("region " + this.regionInfo.regionName + " closed");
|
LOG.info("region " + this.regionInfo.regionName + " closed");
|
||||||
return new Vector<HStoreFile>();
|
return new Vector<HStoreFile>();
|
||||||
}
|
}
|
||||||
|
@ -376,13 +377,13 @@ public class HRegion implements HConstants {
|
||||||
shouldClose = true;
|
shouldClose = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!shouldClose) {
|
if(! shouldClose) {
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
LOG.info("closing region " + this.regionInfo.regionName);
|
LOG.info("closing region " + this.regionInfo.regionName);
|
||||||
Vector<HStoreFile> allHStoreFiles = internalFlushcache();
|
Vector<HStoreFile> allHStoreFiles = internalFlushcache();
|
||||||
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
|
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
|
||||||
HStore store = it.next();
|
HStore store = it.next();
|
||||||
store.close();
|
store.close();
|
||||||
}
|
}
|
||||||
|
@ -406,8 +407,8 @@ public class HRegion implements HConstants {
|
||||||
* Returns two brand-new (and open) HRegions
|
* Returns two brand-new (and open) HRegions
|
||||||
*/
|
*/
|
||||||
public HRegion[] closeAndSplit(Text midKey) throws IOException {
|
public HRegion[] closeAndSplit(Text midKey) throws IOException {
|
||||||
if (((regionInfo.startKey.getLength() != 0)
|
if(((regionInfo.startKey.getLength() != 0)
|
||||||
&& (regionInfo.startKey.compareTo(midKey) > 0))
|
&& (regionInfo.startKey.compareTo(midKey) > 0))
|
||||||
|| ((regionInfo.endKey.getLength() != 0)
|
|| ((regionInfo.endKey.getLength() != 0)
|
||||||
&& (regionInfo.endKey.compareTo(midKey) < 0))) {
|
&& (regionInfo.endKey.compareTo(midKey) < 0))) {
|
||||||
throw new IOException("Region splitkey must lie within region boundaries.");
|
throw new IOException("Region splitkey must lie within region boundaries.");
|
||||||
|
@ -419,13 +420,13 @@ public class HRegion implements HConstants {
|
||||||
// or compactions until close() is called.
|
// or compactions until close() is called.
|
||||||
|
|
||||||
Path splits = new Path(regiondir, SPLITDIR);
|
Path splits = new Path(regiondir, SPLITDIR);
|
||||||
if (!fs.exists(splits)) {
|
if(! fs.exists(splits)) {
|
||||||
fs.mkdirs(splits);
|
fs.mkdirs(splits);
|
||||||
}
|
}
|
||||||
|
|
||||||
long regionAId = Math.abs(rand.nextLong());
|
long regionAId = Math.abs(rand.nextLong());
|
||||||
HRegionInfo regionAInfo = new HRegionInfo(regionAId, regionInfo.tableDesc,
|
HRegionInfo regionAInfo = new HRegionInfo(regionAId, regionInfo.tableDesc,
|
||||||
regionInfo.startKey, midKey);
|
regionInfo.startKey, midKey);
|
||||||
|
|
||||||
long regionBId = Math.abs(rand.nextLong());
|
long regionBId = Math.abs(rand.nextLong());
|
||||||
HRegionInfo regionBInfo
|
HRegionInfo regionBInfo
|
||||||
|
@ -434,24 +435,24 @@ public class HRegion implements HConstants {
|
||||||
Path dirA = HStoreFile.getHRegionDir(splits, regionAInfo.regionName);
|
Path dirA = HStoreFile.getHRegionDir(splits, regionAInfo.regionName);
|
||||||
Path dirB = HStoreFile.getHRegionDir(splits, regionBInfo.regionName);
|
Path dirB = HStoreFile.getHRegionDir(splits, regionBInfo.regionName);
|
||||||
|
|
||||||
if (fs.exists(dirA) || fs.exists(dirB)) {
|
if(fs.exists(dirA) || fs.exists(dirB)) {
|
||||||
throw new IOException("Cannot split; target file collision at " + dirA
|
throw new IOException("Cannot split; target file collision at " + dirA
|
||||||
+ " or " + dirB);
|
+ " or " + dirB);
|
||||||
}
|
}
|
||||||
|
|
||||||
TreeSet<HStoreFile> alreadySplit = new TreeSet<HStoreFile>();
|
TreeSet<HStoreFile> alreadySplit = new TreeSet<HStoreFile>();
|
||||||
Vector<HStoreFile> hstoreFilesToSplit = flushcache(true);
|
Vector<HStoreFile> hstoreFilesToSplit = flushcache(true);
|
||||||
for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
|
|
||||||
LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
|
LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
|
||||||
+ "/" + hsf.fileId());
|
+ "/" + hsf.fileId());
|
||||||
|
|
||||||
HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName,
|
HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName,
|
||||||
hsf.getColFamily(), Math.abs(rand.nextLong()));
|
hsf.getColFamily(), Math.abs(rand.nextLong()));
|
||||||
|
|
||||||
HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName,
|
HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName,
|
||||||
hsf.getColFamily(), Math.abs(rand.nextLong()));
|
hsf.getColFamily(), Math.abs(rand.nextLong()));
|
||||||
|
|
||||||
hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
|
hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
|
||||||
alreadySplit.add(hsf);
|
alreadySplit.add(hsf);
|
||||||
|
@ -461,18 +462,18 @@ public class HRegion implements HConstants {
|
||||||
// and copy the small remainder
|
// and copy the small remainder
|
||||||
|
|
||||||
hstoreFilesToSplit = close();
|
hstoreFilesToSplit = close();
|
||||||
for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
|
|
||||||
if (!alreadySplit.contains(hsf)) {
|
if(! alreadySplit.contains(hsf)) {
|
||||||
LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
|
LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
|
||||||
+ "/" + hsf.fileId());
|
+ "/" + hsf.fileId());
|
||||||
|
|
||||||
HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName,
|
HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName,
|
||||||
hsf.getColFamily(), Math.abs(rand.nextLong()));
|
hsf.getColFamily(), Math.abs(rand.nextLong()));
|
||||||
|
|
||||||
HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName,
|
HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName,
|
||||||
hsf.getColFamily(), Math.abs(rand.nextLong()));
|
hsf.getColFamily(), Math.abs(rand.nextLong()));
|
||||||
|
|
||||||
hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
|
hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
|
||||||
}
|
}
|
||||||
|
@ -494,7 +495,7 @@ public class HRegion implements HConstants {
|
||||||
regions[1] = regionB;
|
regions[1] = regionB;
|
||||||
|
|
||||||
LOG.info("region split complete. new regions are: " + regions[0].getRegionName()
|
LOG.info("region split complete. new regions are: " + regions[0].getRegionName()
|
||||||
+ ", " + regions[1].getRegionName());
|
+ ", " + regions[1].getRegionName());
|
||||||
|
|
||||||
return regions;
|
return regions;
|
||||||
}
|
}
|
||||||
|
@ -565,10 +566,10 @@ public class HRegion implements HConstants {
|
||||||
Text key = new Text();
|
Text key = new Text();
|
||||||
long maxSize = 0;
|
long maxSize = 0;
|
||||||
|
|
||||||
for(Iterator<HStore> i = stores.values().iterator(); i.hasNext();) {
|
for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) {
|
||||||
long size = i.next().getLargestFileSize(key);
|
long size = i.next().getLargestFileSize(key);
|
||||||
|
|
||||||
if (size > maxSize) { // Largest so far
|
if(size > maxSize) { // Largest so far
|
||||||
maxSize = size;
|
maxSize = size;
|
||||||
midKey.set(key);
|
midKey.set(key);
|
||||||
}
|
}
|
||||||
|
@ -593,9 +594,9 @@ public class HRegion implements HConstants {
|
||||||
public boolean compactStores() throws IOException {
|
public boolean compactStores() throws IOException {
|
||||||
boolean shouldCompact = false;
|
boolean shouldCompact = false;
|
||||||
synchronized(writestate) {
|
synchronized(writestate) {
|
||||||
if ((!writestate.writesOngoing)
|
if((! writestate.writesOngoing)
|
||||||
&& writestate.writesEnabled
|
&& writestate.writesEnabled
|
||||||
&& (!writestate.closed)
|
&& (! writestate.closed)
|
||||||
&& recentCommits > MIN_COMMITS_FOR_COMPACTION) {
|
&& recentCommits > MIN_COMMITS_FOR_COMPACTION) {
|
||||||
|
|
||||||
writestate.writesOngoing = true;
|
writestate.writesOngoing = true;
|
||||||
|
@ -603,14 +604,14 @@ public class HRegion implements HConstants {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!shouldCompact) {
|
if(! shouldCompact) {
|
||||||
LOG.info("not compacting region " + this.regionInfo.regionName);
|
LOG.info("not compacting region " + this.regionInfo.regionName);
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
try {
|
try {
|
||||||
LOG.info("starting compaction on region " + this.regionInfo.regionName);
|
LOG.info("starting compaction on region " + this.regionInfo.regionName);
|
||||||
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
|
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
|
||||||
HStore store = it.next();
|
HStore store = it.next();
|
||||||
store.compact();
|
store.compact();
|
||||||
}
|
}
|
||||||
|
@ -632,7 +633,7 @@ public class HRegion implements HConstants {
|
||||||
* only take if there have been a lot of uncommitted writes.
|
* only take if there have been a lot of uncommitted writes.
|
||||||
*/
|
*/
|
||||||
public void optionallyFlush() throws IOException {
|
public void optionallyFlush() throws IOException {
|
||||||
if (commitsSinceFlush > maxUnflushedEntries) {
|
if(commitsSinceFlush > maxUnflushedEntries) {
|
||||||
flushcache(false);
|
flushcache(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -657,20 +658,20 @@ public class HRegion implements HConstants {
|
||||||
public Vector<HStoreFile> flushcache(boolean disableFutureWrites) throws IOException {
|
public Vector<HStoreFile> flushcache(boolean disableFutureWrites) throws IOException {
|
||||||
boolean shouldFlush = false;
|
boolean shouldFlush = false;
|
||||||
synchronized(writestate) {
|
synchronized(writestate) {
|
||||||
if ((!writestate.writesOngoing)
|
if((! writestate.writesOngoing)
|
||||||
&& writestate.writesEnabled
|
&& writestate.writesEnabled
|
||||||
&& (!writestate.closed)) {
|
&& (! writestate.closed)) {
|
||||||
|
|
||||||
writestate.writesOngoing = true;
|
writestate.writesOngoing = true;
|
||||||
shouldFlush = true;
|
shouldFlush = true;
|
||||||
|
|
||||||
if (disableFutureWrites) {
|
if(disableFutureWrites) {
|
||||||
writestate.writesEnabled = false;
|
writestate.writesEnabled = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!shouldFlush) {
|
if(! shouldFlush) {
|
||||||
LOG.debug("not flushing cache for region " + this.regionInfo.regionName);
|
LOG.debug("not flushing cache for region " + this.regionInfo.regionName);
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
|
@ -731,8 +732,8 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
HMemcache.Snapshot retval = memcache.snapshotMemcacheForLog(log);
|
HMemcache.Snapshot retval = memcache.snapshotMemcacheForLog(log);
|
||||||
TreeMap<HStoreKey, BytesWritable> memcacheSnapshot = retval.memcacheSnapshot;
|
TreeMap<HStoreKey, BytesWritable> memcacheSnapshot = retval.memcacheSnapshot;
|
||||||
if (memcacheSnapshot == null) {
|
if(memcacheSnapshot == null) {
|
||||||
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
|
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
|
||||||
HStore hstore = it.next();
|
HStore hstore = it.next();
|
||||||
Vector<HStoreFile> hstoreFiles = hstore.getAllMapFiles();
|
Vector<HStoreFile> hstoreFiles = hstore.getAllMapFiles();
|
||||||
allHStoreFiles.addAll(0, hstoreFiles);
|
allHStoreFiles.addAll(0, hstoreFiles);
|
||||||
|
@ -746,7 +747,7 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
LOG.debug("flushing memcache to HStores");
|
LOG.debug("flushing memcache to HStores");
|
||||||
|
|
||||||
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
|
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
|
||||||
HStore hstore = it.next();
|
HStore hstore = it.next();
|
||||||
Vector<HStoreFile> hstoreFiles
|
Vector<HStoreFile> hstoreFiles
|
||||||
= hstore.flushCache(memcacheSnapshot, logCacheFlushId);
|
= hstore.flushCache(memcacheSnapshot, logCacheFlushId);
|
||||||
|
@ -762,7 +763,7 @@ public class HRegion implements HConstants {
|
||||||
LOG.debug("writing flush cache complete to log");
|
LOG.debug("writing flush cache complete to log");
|
||||||
|
|
||||||
log.completeCacheFlush(this.regionInfo.regionName,
|
log.completeCacheFlush(this.regionInfo.regionName,
|
||||||
regionInfo.tableDesc.getName(), logCacheFlushId);
|
regionInfo.tableDesc.getName(), logCacheFlushId);
|
||||||
|
|
||||||
// C. Delete the now-irrelevant memcache snapshot; its contents have been
|
// C. Delete the now-irrelevant memcache snapshot; its contents have been
|
||||||
// dumped to disk-based HStores.
|
// dumped to disk-based HStores.
|
||||||
|
@ -784,7 +785,7 @@ public class HRegion implements HConstants {
|
||||||
/** Fetch a single data item. */
|
/** Fetch a single data item. */
|
||||||
public byte[] get(Text row, Text column) throws IOException {
|
public byte[] get(Text row, Text column) throws IOException {
|
||||||
byte results[][] = get(row, column, Long.MAX_VALUE, 1);
|
byte results[][] = get(row, column, Long.MAX_VALUE, 1);
|
||||||
if (results == null) {
|
if(results == null) {
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -799,17 +800,16 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
/** Fetch multiple versions of a single data item, with timestamp. */
|
/** Fetch multiple versions of a single data item, with timestamp. */
|
||||||
public byte[][] get(Text row, Text column, long timestamp, int numVersions)
|
public byte[][] get(Text row, Text column, long timestamp, int numVersions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
if (writestate.closed) {
|
if(writestate.closed) {
|
||||||
throw new IOException("HRegion is closed.");
|
throw new IOException("HRegion is closed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure this is a valid row and valid column
|
// Make sure this is a valid row and valid column
|
||||||
|
|
||||||
checkRow(row);
|
checkRow(row);
|
||||||
Text colFamily = HStoreKey.extractFamily(column);
|
checkColumn(column);
|
||||||
checkFamily(colFamily);
|
|
||||||
|
|
||||||
// Obtain the row-lock
|
// Obtain the row-lock
|
||||||
|
|
||||||
|
@ -830,7 +830,7 @@ public class HRegion implements HConstants {
|
||||||
// Check the memcache
|
// Check the memcache
|
||||||
|
|
||||||
byte[][] result = memcache.get(key, numVersions);
|
byte[][] result = memcache.get(key, numVersions);
|
||||||
if (result != null) {
|
if(result != null) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -838,7 +838,7 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
Text colFamily = HStoreKey.extractFamily(key.getColumn());
|
Text colFamily = HStoreKey.extractFamily(key.getColumn());
|
||||||
HStore targetStore = stores.get(colFamily);
|
HStore targetStore = stores.get(colFamily);
|
||||||
if (targetStore == null) {
|
if(targetStore == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -859,7 +859,7 @@ public class HRegion implements HConstants {
|
||||||
HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
|
HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
|
||||||
|
|
||||||
TreeMap<Text, byte[]> memResult = memcache.getFull(key);
|
TreeMap<Text, byte[]> memResult = memcache.getFull(key);
|
||||||
for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text colFamily = it.next();
|
Text colFamily = it.next();
|
||||||
HStore targetStore = stores.get(colFamily);
|
HStore targetStore = stores.get(colFamily);
|
||||||
targetStore.getFull(key, memResult);
|
targetStore.getFull(key, memResult);
|
||||||
|
@ -879,7 +879,7 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
HStore storelist[] = new HStore[families.size()];
|
HStore storelist[] = new HStore[families.size()];
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for(Iterator<Text> it = families.iterator(); it.hasNext();) {
|
for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
|
||||||
Text family = it.next();
|
Text family = it.next();
|
||||||
storelist[i++] = stores.get(family);
|
storelist[i++] = stores.get(family);
|
||||||
}
|
}
|
||||||
|
@ -911,23 +911,23 @@ public class HRegion implements HConstants {
|
||||||
/**
|
/**
|
||||||
* Put a cell value into the locked row. The user indicates the row-lock, the
|
* Put a cell value into the locked row. The user indicates the row-lock, the
|
||||||
* target column, and the desired value. This stuff is set into a temporary
|
* target column, and the desired value. This stuff is set into a temporary
|
||||||
* memory area until the user commits the change, at which pointit's logged
|
* memory area until the user commits the change, at which point it's logged
|
||||||
* and placed into the memcache.
|
* and placed into the memcache.
|
||||||
*
|
*
|
||||||
* This method really just tests the input, then calls an internal localput()
|
* This method really just tests the input, then calls an internal localput()
|
||||||
* method.
|
* method.
|
||||||
*/
|
*/
|
||||||
public void put(long lockid, Text targetCol, byte[] val) throws IOException {
|
public void put(long lockid, Text targetCol, byte[] val) throws IOException {
|
||||||
if (val.length == HStoreKey.DELETE_BYTES.length) {
|
if(val.length == HStoreKey.DELETE_BYTES.length) {
|
||||||
boolean matches = true;
|
boolean matches = true;
|
||||||
for(int i = 0; i < val.length; i++) {
|
for(int i = 0; i < val.length; i++) {
|
||||||
if (val[i] != HStoreKey.DELETE_BYTES[i]) {
|
if(val[i] != HStoreKey.DELETE_BYTES[i]) {
|
||||||
matches = false;
|
matches = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (matches) {
|
if(matches) {
|
||||||
throw new IOException("Cannot insert value: " + val);
|
throw new IOException("Cannot insert value: " + val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -950,9 +950,11 @@ public class HRegion implements HConstants {
|
||||||
* (Or until the user's write-lock expires.)
|
* (Or until the user's write-lock expires.)
|
||||||
*/
|
*/
|
||||||
void localput(long lockid, Text targetCol, byte[] val) throws IOException {
|
void localput(long lockid, Text targetCol, byte[] val) throws IOException {
|
||||||
|
checkColumn(targetCol);
|
||||||
|
|
||||||
Text row = getRowFromLock(lockid);
|
Text row = getRowFromLock(lockid);
|
||||||
if (row == null) {
|
if(row == null) {
|
||||||
throw new IOException("No write lock for lockid " + lockid);
|
throw new LockException("No write lock for lockid " + lockid);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This sync block makes localput() thread-safe when multiple
|
// This sync block makes localput() thread-safe when multiple
|
||||||
|
@ -964,13 +966,13 @@ public class HRegion implements HConstants {
|
||||||
// This check makes sure that another thread from the client
|
// This check makes sure that another thread from the client
|
||||||
// hasn't aborted/committed the write-operation.
|
// hasn't aborted/committed the write-operation.
|
||||||
|
|
||||||
if (row != getRowFromLock(lockid)) {
|
if(row != getRowFromLock(lockid)) {
|
||||||
throw new IOException("Locking error: put operation on lock " + lockid
|
throw new LockException("Locking error: put operation on lock " + lockid
|
||||||
+ " unexpected aborted by another thread");
|
+ " unexpected aborted by another thread");
|
||||||
}
|
}
|
||||||
|
|
||||||
TreeMap<Text, byte[]> targets = targetColumns.get(lockid);
|
TreeMap<Text, byte[]> targets = targetColumns.get(lockid);
|
||||||
if (targets == null) {
|
if(targets == null) {
|
||||||
targets = new TreeMap<Text, byte[]>();
|
targets = new TreeMap<Text, byte[]>();
|
||||||
targetColumns.put(lockid, targets);
|
targetColumns.put(lockid, targets);
|
||||||
}
|
}
|
||||||
|
@ -985,8 +987,8 @@ public class HRegion implements HConstants {
|
||||||
*/
|
*/
|
||||||
public void abort(long lockid) throws IOException {
|
public void abort(long lockid) throws IOException {
|
||||||
Text row = getRowFromLock(lockid);
|
Text row = getRowFromLock(lockid);
|
||||||
if (row == null) {
|
if(row == null) {
|
||||||
throw new IOException("No write lock for lockid " + lockid);
|
throw new LockException("No write lock for lockid " + lockid);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This sync block makes abort() thread-safe when multiple
|
// This sync block makes abort() thread-safe when multiple
|
||||||
|
@ -998,9 +1000,9 @@ public class HRegion implements HConstants {
|
||||||
// This check makes sure another thread from the client
|
// This check makes sure another thread from the client
|
||||||
// hasn't aborted/committed the write-operation.
|
// hasn't aborted/committed the write-operation.
|
||||||
|
|
||||||
if (row != getRowFromLock(lockid)) {
|
if(row != getRowFromLock(lockid)) {
|
||||||
throw new IOException("Locking error: abort() operation on lock "
|
throw new LockException("Locking error: abort() operation on lock "
|
||||||
+ lockid + " unexpected aborted by another thread");
|
+ lockid + " unexpected aborted by another thread");
|
||||||
}
|
}
|
||||||
|
|
||||||
targetColumns.remove(lockid);
|
targetColumns.remove(lockid);
|
||||||
|
@ -1021,8 +1023,8 @@ public class HRegion implements HConstants {
|
||||||
// that repeated executions won't screw this up.
|
// that repeated executions won't screw this up.
|
||||||
|
|
||||||
Text row = getRowFromLock(lockid);
|
Text row = getRowFromLock(lockid);
|
||||||
if (row == null) {
|
if(row == null) {
|
||||||
throw new IOException("No write lock for lockid " + lockid);
|
throw new LockException("No write lock for lockid " + lockid);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This check makes sure that another thread from the client
|
// This check makes sure that another thread from the client
|
||||||
|
@ -1035,7 +1037,7 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
long commitTimestamp = System.currentTimeMillis();
|
long commitTimestamp = System.currentTimeMillis();
|
||||||
log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row,
|
log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row,
|
||||||
targetColumns.get(lockid), commitTimestamp);
|
targetColumns.get(lockid), commitTimestamp);
|
||||||
|
|
||||||
memcache.add(row, targetColumns.get(lockid), commitTimestamp);
|
memcache.add(row, targetColumns.get(lockid), commitTimestamp);
|
||||||
|
|
||||||
|
@ -1054,25 +1056,26 @@ public class HRegion implements HConstants {
|
||||||
|
|
||||||
/** Make sure this is a valid row for the HRegion */
|
/** Make sure this is a valid row for the HRegion */
|
||||||
void checkRow(Text row) throws IOException {
|
void checkRow(Text row) throws IOException {
|
||||||
if (((regionInfo.startKey.getLength() == 0)
|
if(((regionInfo.startKey.getLength() == 0)
|
||||||
|| (regionInfo.startKey.compareTo(row) <= 0))
|
|| (regionInfo.startKey.compareTo(row) <= 0))
|
||||||
&& ((regionInfo.endKey.getLength() == 0)
|
&& ((regionInfo.endKey.getLength() == 0)
|
||||||
|| (regionInfo.endKey.compareTo(row) > 0))) {
|
|| (regionInfo.endKey.compareTo(row) > 0))) {
|
||||||
// all's well
|
// all's well
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("Requested row out of range for HRegion "
|
throw new IOException("Requested row out of range for HRegion "
|
||||||
+ regionInfo.regionName + ", startKey='" + regionInfo.startKey
|
+ regionInfo.regionName + ", startKey='" + regionInfo.startKey
|
||||||
+ "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
|
+ "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Make sure this is a valid column for the current table */
|
/** Make sure this is a valid column for the current table */
|
||||||
void checkFamily(Text family) throws IOException {
|
void checkColumn(Text columnName) throws IOException {
|
||||||
if (!regionInfo.tableDesc.hasFamily(family)) {
|
Text family = new Text(HStoreKey.extractFamily(columnName) + ":");
|
||||||
|
if(! regionInfo.tableDesc.hasFamily(family)) {
|
||||||
throw new IOException("Requested column family " + family
|
throw new IOException("Requested column family " + family
|
||||||
+ " does not exist in HRegion " + regionInfo.regionName
|
+ " does not exist in HRegion " + regionInfo.regionName
|
||||||
+ " for table " + regionInfo.tableDesc.getName());
|
+ " for table " + regionInfo.tableDesc.getName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1092,6 +1095,8 @@ public class HRegion implements HConstants {
|
||||||
* which maybe we'll do in the future.
|
* which maybe we'll do in the future.
|
||||||
*/
|
*/
|
||||||
long obtainLock(Text row) throws IOException {
|
long obtainLock(Text row) throws IOException {
|
||||||
|
checkRow(row);
|
||||||
|
|
||||||
synchronized(rowsToLocks) {
|
synchronized(rowsToLocks) {
|
||||||
while(rowsToLocks.get(row) != null) {
|
while(rowsToLocks.get(row) != null) {
|
||||||
try {
|
try {
|
||||||
|
@ -1109,6 +1114,8 @@ public class HRegion implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
Text getRowFromLock(long lockid) throws IOException {
|
Text getRowFromLock(long lockid) throws IOException {
|
||||||
|
// Pattern is that all access to rowsToLocks and/or to
|
||||||
|
// locksToRows is via a lock on rowsToLocks.
|
||||||
synchronized(rowsToLocks) {
|
synchronized(rowsToLocks) {
|
||||||
return locksToRows.get(lockid);
|
return locksToRows.get(lockid);
|
||||||
}
|
}
|
||||||
|
@ -1150,7 +1157,7 @@ public class HRegion implements HConstants {
|
||||||
keys[i] = new HStoreKey();
|
keys[i] = new HStoreKey();
|
||||||
resultSets[i] = new TreeMap<Text, byte[]>();
|
resultSets[i] = new TreeMap<Text, byte[]>();
|
||||||
|
|
||||||
if (!scanners[i].next(keys[i], resultSets[i])) {
|
if(! scanners[i].next(keys[i], resultSets[i])) {
|
||||||
closeScanner(i);
|
closeScanner(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1167,7 +1174,7 @@ public class HRegion implements HConstants {
|
||||||
Text chosenRow = null;
|
Text chosenRow = null;
|
||||||
long chosenTimestamp = -1;
|
long chosenTimestamp = -1;
|
||||||
for(int i = 0; i < keys.length; i++) {
|
for(int i = 0; i < keys.length; i++) {
|
||||||
if (scanners[i] != null
|
if(scanners[i] != null
|
||||||
&& (chosenRow == null
|
&& (chosenRow == null
|
||||||
|| (keys[i].getRow().compareTo(chosenRow) < 0)
|
|| (keys[i].getRow().compareTo(chosenRow) < 0)
|
||||||
|| ((keys[i].getRow().compareTo(chosenRow) == 0)
|
|| ((keys[i].getRow().compareTo(chosenRow) == 0)
|
||||||
|
@ -1181,21 +1188,21 @@ public class HRegion implements HConstants {
|
||||||
// Store the key and results for each sub-scanner. Merge them as appropriate.
|
// Store the key and results for each sub-scanner. Merge them as appropriate.
|
||||||
|
|
||||||
boolean insertedItem = false;
|
boolean insertedItem = false;
|
||||||
if (chosenTimestamp > 0) {
|
if(chosenTimestamp > 0) {
|
||||||
key.setRow(chosenRow);
|
key.setRow(chosenRow);
|
||||||
key.setVersion(chosenTimestamp);
|
key.setVersion(chosenTimestamp);
|
||||||
key.setColumn(new Text(""));
|
key.setColumn(new Text(""));
|
||||||
|
|
||||||
for(int i = 0; i < scanners.length; i++) {
|
for(int i = 0; i < scanners.length; i++) {
|
||||||
while((scanners[i] != null)
|
while((scanners[i] != null)
|
||||||
&& (keys[i].getRow().compareTo(chosenRow) == 0)
|
&& (keys[i].getRow().compareTo(chosenRow) == 0)
|
||||||
&& (keys[i].getTimestamp() == chosenTimestamp)) {
|
&& (keys[i].getTimestamp() == chosenTimestamp)) {
|
||||||
|
|
||||||
results.putAll(resultSets[i]);
|
results.putAll(resultSets[i]);
|
||||||
insertedItem = true;
|
insertedItem = true;
|
||||||
|
|
||||||
resultSets[i].clear();
|
resultSets[i].clear();
|
||||||
if (!scanners[i].next(keys[i], resultSets[i])) {
|
if(! scanners[i].next(keys[i], resultSets[i])) {
|
||||||
closeScanner(i);
|
closeScanner(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1204,10 +1211,10 @@ public class HRegion implements HConstants {
|
||||||
// row label, then its timestamp is bad. We need to advance it.
|
// row label, then its timestamp is bad. We need to advance it.
|
||||||
|
|
||||||
while((scanners[i] != null)
|
while((scanners[i] != null)
|
||||||
&& (keys[i].getRow().compareTo(chosenRow) <= 0)) {
|
&& (keys[i].getRow().compareTo(chosenRow) <= 0)) {
|
||||||
|
|
||||||
resultSets[i].clear();
|
resultSets[i].clear();
|
||||||
if (!scanners[i].next(keys[i], resultSets[i])) {
|
if(! scanners[i].next(keys[i], resultSets[i])) {
|
||||||
closeScanner(i);
|
closeScanner(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1231,7 +1238,7 @@ public class HRegion implements HConstants {
|
||||||
/** All done with the scanner. */
|
/** All done with the scanner. */
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
for(int i = 0; i < scanners.length; i++) {
|
for(int i = 0; i < scanners.length; i++) {
|
||||||
if (scanners[i] != null) {
|
if(scanners[i] != null) {
|
||||||
closeScanner(i);
|
closeScanner(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,28 +38,28 @@ public class HRegionInfo implements Writable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public HRegionInfo(long regionId, HTableDescriptor tableDesc, Text startKey,
|
public HRegionInfo(long regionId, HTableDescriptor tableDesc, Text startKey,
|
||||||
Text endKey) throws IllegalArgumentException {
|
Text endKey) throws IllegalArgumentException {
|
||||||
|
|
||||||
this.regionId = regionId;
|
this.regionId = regionId;
|
||||||
|
|
||||||
if (tableDesc == null) {
|
if(tableDesc == null) {
|
||||||
throw new IllegalArgumentException("tableDesc cannot be null");
|
throw new IllegalArgumentException("tableDesc cannot be null");
|
||||||
}
|
}
|
||||||
|
|
||||||
this.tableDesc = tableDesc;
|
this.tableDesc = tableDesc;
|
||||||
|
|
||||||
this.startKey = new Text();
|
this.startKey = new Text();
|
||||||
if (startKey != null) {
|
if(startKey != null) {
|
||||||
this.startKey.set(startKey);
|
this.startKey.set(startKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
this.endKey = new Text();
|
this.endKey = new Text();
|
||||||
if (endKey != null) {
|
if(endKey != null) {
|
||||||
this.endKey.set(endKey);
|
this.endKey.set(endKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
this.regionName = new Text(tableDesc.getName() + "_"
|
this.regionName = new Text(tableDesc.getName() + "_"
|
||||||
+ (startKey == null ? "" : startKey.toString()) + "_" + regionId);
|
+ (startKey == null ? "" : startKey.toString()) + "_" + regionId);
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
|
@ -15,7 +15,9 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import org.apache.hadoop.io.*;
|
import org.apache.hadoop.io.BytesWritable;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.*;
|
||||||
|
|
||||||
|
@ -23,17 +25,13 @@ import java.io.*;
|
||||||
* Clients interact with HRegionServers using
|
* Clients interact with HRegionServers using
|
||||||
* a handle to the HRegionInterface.
|
* a handle to the HRegionInterface.
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
public interface HRegionInterface {
|
public interface HRegionInterface extends VersionedProtocol {
|
||||||
public static final long versionID = 1L; // initial version
|
public static final long versionID = 1L; // initial version
|
||||||
|
|
||||||
// Get metainfo about an HRegion
|
// Get metainfo about an HRegion
|
||||||
|
|
||||||
public HRegionInfo getRegionInfo(Text regionName);
|
public HRegionInfo getRegionInfo(Text regionName);
|
||||||
|
|
||||||
// Start a scanner for a given HRegion.
|
|
||||||
|
|
||||||
public HScannerInterface openScanner(Text regionName, Text[] columns, Text startRow) throws IOException;
|
|
||||||
|
|
||||||
// GET methods for an HRegion.
|
// GET methods for an HRegion.
|
||||||
|
|
||||||
public BytesWritable get(Text regionName, Text row, Text column) throws IOException;
|
public BytesWritable get(Text regionName, Text row, Text column) throws IOException;
|
||||||
|
@ -58,4 +56,41 @@ public interface HRegionInterface {
|
||||||
public void abort(Text regionName, long clientid, long lockid) throws IOException;
|
public void abort(Text regionName, long clientid, long lockid) throws IOException;
|
||||||
public void commit(Text regionName, long clientid, long lockid) throws IOException;
|
public void commit(Text regionName, long clientid, long lockid) throws IOException;
|
||||||
public void renewLease(long lockid, long clientid) throws IOException;
|
public void renewLease(long lockid, long clientid) throws IOException;
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// remote scanner interface
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opens a remote scanner.
|
||||||
|
*
|
||||||
|
* @param clientId - client identifier (so we can associate a scanner with a client)
|
||||||
|
* @param regionName - name of region to scan
|
||||||
|
* @param columns - columns to scan
|
||||||
|
* @param startRow - starting row to scan
|
||||||
|
*
|
||||||
|
* @param scannerId - scanner identifier used in other calls
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public long openScanner(Text regionName, Text[] columns, Text startRow) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the next set of values
|
||||||
|
*
|
||||||
|
* @param scannerId - clientId passed to openScanner
|
||||||
|
* @param key - the next HStoreKey
|
||||||
|
* @param columns - an array of column names
|
||||||
|
* @param values - an array of byte[] values (corresponds 1-1 with columns)
|
||||||
|
* @return - true if a value was retrieved
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public LabelledData[] next(long scannerId, HStoreKey key) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close a scanner
|
||||||
|
*
|
||||||
|
* @param scannerId - the scanner id returned by openScanner
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public void close(long scannerId) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,8 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.io.*;
|
import org.apache.hadoop.io.*;
|
||||||
import org.apache.hadoop.fs.*;
|
import org.apache.hadoop.fs.*;
|
||||||
import org.apache.hadoop.ipc.*;
|
import org.apache.hadoop.ipc.*;
|
||||||
|
@ -22,19 +24,34 @@ import org.apache.hadoop.conf.*;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.*;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
import java.util.concurrent.locks.ReadWriteLock;
|
||||||
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* HRegionServer makes a set of HRegions available to clients. It checks in with
|
* HRegionServer makes a set of HRegions available to clients. It checks in with
|
||||||
* the HMaster. There are many HRegionServers in a single HBase deployment.
|
* the HMaster. There are many HRegionServers in a single HBase deployment.
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
public class HRegionServer
|
||||||
|
implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
|
public long getProtocolVersion(String protocol,
|
||||||
|
long clientVersion) throws IOException {
|
||||||
|
if (protocol.equals(HRegionInterface.class.getName())) {
|
||||||
|
return HRegionInterface.versionID;
|
||||||
|
} else {
|
||||||
|
throw new IOException("Unknown protocol to name node: " + protocol);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(HRegionServer.class);
|
||||||
|
|
||||||
private boolean stopRequested;
|
private boolean stopRequested;
|
||||||
private Path regionDir;
|
private Path regionDir;
|
||||||
private HServerAddress address;
|
private HServerAddress address;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
private Random rand;
|
private Random rand;
|
||||||
private TreeMap<Text, HRegion> regions; // region name -> HRegion
|
private TreeMap<Text, HRegion> regions; // region name -> HRegion
|
||||||
private HLocking locking;
|
private ReadWriteLock locker;
|
||||||
private Vector<HMsg> outboundMsgs;
|
private Vector<HMsg> outboundMsgs;
|
||||||
|
|
||||||
private long threadWakeFrequency;
|
private long threadWakeFrequency;
|
||||||
|
@ -61,29 +78,29 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void run() {
|
public void run() {
|
||||||
while(!stopRequested) {
|
while(! stopRequested) {
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
// Grab a list of regions to check
|
// Grab a list of regions to check
|
||||||
|
|
||||||
Vector<HRegion> checkSplit = new Vector<HRegion>();
|
Vector<HRegion> checkSplit = new Vector<HRegion>();
|
||||||
locking.obtainReadLock();
|
locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
checkSplit.addAll(regions.values());
|
checkSplit.addAll(regions.values());
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
locker.readLock().unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check to see if they need splitting
|
// Check to see if they need splitting
|
||||||
|
|
||||||
Vector<SplitRegion> toSplit = new Vector<SplitRegion>();
|
Vector<SplitRegion> toSplit = new Vector<SplitRegion>();
|
||||||
for(Iterator<HRegion> it = checkSplit.iterator(); it.hasNext();) {
|
for(Iterator<HRegion> it = checkSplit.iterator(); it.hasNext(); ) {
|
||||||
HRegion cur = it.next();
|
HRegion cur = it.next();
|
||||||
Text midKey = new Text();
|
Text midKey = new Text();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (cur.needsSplit(midKey)) {
|
if(cur.needsSplit(midKey)) {
|
||||||
toSplit.add(new SplitRegion(cur, midKey));
|
toSplit.add(new SplitRegion(cur, midKey));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,17 +109,19 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for(Iterator<SplitRegion> it = toSplit.iterator(); it.hasNext();) {
|
for(Iterator<SplitRegion> it = toSplit.iterator(); it.hasNext(); ) {
|
||||||
SplitRegion r = it.next();
|
SplitRegion r = it.next();
|
||||||
|
|
||||||
locking.obtainWriteLock();
|
locker.writeLock().lock();
|
||||||
regions.remove(r.region.getRegionName());
|
regions.remove(r.region.getRegionName());
|
||||||
locking.releaseWriteLock();
|
locker.writeLock().unlock();
|
||||||
|
|
||||||
HRegion[] newRegions = null;
|
HRegion[] newRegions = null;
|
||||||
try {
|
try {
|
||||||
Text oldRegion = r.region.getRegionName();
|
Text oldRegion = r.region.getRegionName();
|
||||||
|
|
||||||
|
LOG.info("splitting region: " + oldRegion);
|
||||||
|
|
||||||
newRegions = r.region.closeAndSplit(r.midKey);
|
newRegions = r.region.closeAndSplit(r.midKey);
|
||||||
|
|
||||||
// When a region is split, the META table needs to updated if we're
|
// When a region is split, the META table needs to updated if we're
|
||||||
|
@ -111,8 +130,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
Text tableToUpdate
|
Text tableToUpdate
|
||||||
= (oldRegion.find(META_TABLE_NAME.toString()) == 0)
|
= (oldRegion.find(META_TABLE_NAME.toString()) == 0)
|
||||||
? ROOT_TABLE_NAME : META_TABLE_NAME;
|
? ROOT_TABLE_NAME : META_TABLE_NAME;
|
||||||
|
|
||||||
|
LOG.debug("region split complete. updating meta");
|
||||||
|
|
||||||
client.openTable(tableToUpdate);
|
client.openTable(tableToUpdate);
|
||||||
long lockid = client.startUpdate(oldRegion);
|
long lockid = client.startUpdate(oldRegion);
|
||||||
client.delete(lockid, META_COL_REGIONINFO);
|
client.delete(lockid, META_COL_REGIONINFO);
|
||||||
|
@ -132,7 +153,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
// Now tell the master about the new regions
|
// Now tell the master about the new regions
|
||||||
|
|
||||||
|
LOG.debug("reporting region split to master");
|
||||||
|
|
||||||
reportSplit(newRegions[0].getRegionInfo(), newRegions[1].getRegionInfo());
|
reportSplit(newRegions[0].getRegionInfo(), newRegions[1].getRegionInfo());
|
||||||
|
|
||||||
|
LOG.info("region split successful. old region=" + oldRegion
|
||||||
|
+ ", new regions: " + newRegions[0].getRegionName() + ", "
|
||||||
|
+ newRegions[1].getRegionName());
|
||||||
|
|
||||||
newRegions[0].close();
|
newRegions[0].close();
|
||||||
newRegions[1].close();
|
newRegions[1].close();
|
||||||
|
|
||||||
|
@ -145,11 +173,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
// Sleep
|
// Sleep
|
||||||
|
|
||||||
long endTime = System.currentTimeMillis();
|
long waitTime =
|
||||||
try {
|
splitCheckFrequency - (System.currentTimeMillis() - startTime);
|
||||||
Thread.sleep(splitCheckFrequency - (endTime - startTime));
|
|
||||||
|
if(waitTime > 0) {
|
||||||
} catch(InterruptedException iex) {
|
try {
|
||||||
|
Thread.sleep(waitTime);
|
||||||
|
|
||||||
|
} catch(InterruptedException iex) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -161,23 +193,23 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
private Thread cacheFlusherThread;
|
private Thread cacheFlusherThread;
|
||||||
private class Flusher implements Runnable {
|
private class Flusher implements Runnable {
|
||||||
public void run() {
|
public void run() {
|
||||||
while(!stopRequested) {
|
while(! stopRequested) {
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
// Grab a list of items to flush
|
// Grab a list of items to flush
|
||||||
|
|
||||||
Vector<HRegion> toFlush = new Vector<HRegion>();
|
Vector<HRegion> toFlush = new Vector<HRegion>();
|
||||||
locking.obtainReadLock();
|
locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
toFlush.addAll(regions.values());
|
toFlush.addAll(regions.values());
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
locker.readLock().unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush them, if necessary
|
// Flush them, if necessary
|
||||||
|
|
||||||
for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext();) {
|
for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext(); ) {
|
||||||
HRegion cur = it.next();
|
HRegion cur = it.next();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -190,11 +222,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
// Sleep
|
// Sleep
|
||||||
|
|
||||||
long endTime = System.currentTimeMillis();
|
long waitTime =
|
||||||
try {
|
threadWakeFrequency - (System.currentTimeMillis() - startTime);
|
||||||
Thread.sleep(threadWakeFrequency - (endTime - startTime));
|
|
||||||
|
if(waitTime > 0) {
|
||||||
} catch(InterruptedException iex) {
|
try {
|
||||||
|
Thread.sleep(waitTime);
|
||||||
|
|
||||||
|
} catch(InterruptedException iex) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -212,12 +248,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
private Thread logRollerThread;
|
private Thread logRollerThread;
|
||||||
private class LogRoller implements Runnable {
|
private class LogRoller implements Runnable {
|
||||||
public void run() {
|
public void run() {
|
||||||
while(!stopRequested) {
|
while(! stopRequested) {
|
||||||
|
|
||||||
// If the number of log entries is high enough, roll the log. This is a
|
// If the number of log entries is high enough, roll the log. This is a
|
||||||
// very fast operation, but should not be done too frequently.
|
// very fast operation, but should not be done too frequently.
|
||||||
|
|
||||||
if (log.getNumEntries() > maxLogEntries) {
|
if(log.getNumEntries() > maxLogEntries) {
|
||||||
try {
|
try {
|
||||||
log.rollWriter();
|
log.rollWriter();
|
||||||
|
|
||||||
|
@ -249,24 +285,24 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
/** Start a HRegionServer at the default location */
|
/** Start a HRegionServer at the default location */
|
||||||
public HRegionServer(Configuration conf) throws IOException {
|
public HRegionServer(Configuration conf) throws IOException {
|
||||||
this(new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR)),
|
this(new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR)),
|
||||||
new HServerAddress(conf.get("hbase.regionserver.default.name")),
|
new HServerAddress(conf.get(REGIONSERVER_ADDRESS, "localhost:0")),
|
||||||
conf);
|
conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Start a HRegionServer at an indicated location */
|
/** Start a HRegionServer at an indicated location */
|
||||||
public HRegionServer(Path regionDir, HServerAddress address, Configuration conf)
|
public HRegionServer(Path regionDir, HServerAddress address, Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
// Basic setup
|
// Basic setup
|
||||||
|
|
||||||
this.stopRequested = false;
|
this.stopRequested = false;
|
||||||
this.regionDir = regionDir;
|
this.regionDir = regionDir;
|
||||||
this.address = address;
|
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.rand = new Random();
|
this.rand = new Random();
|
||||||
this.regions = new TreeMap<Text, HRegion>();
|
this.regions = new TreeMap<Text, HRegion>();
|
||||||
this.locking = new HLocking();
|
this.locker = new ReentrantReadWriteLock();
|
||||||
this.outboundMsgs = new Vector<HMsg>();
|
this.outboundMsgs = new Vector<HMsg>();
|
||||||
|
this.scanners = Collections.synchronizedMap(new TreeMap<Text, HScannerInterface>());
|
||||||
|
|
||||||
// Config'ed params
|
// Config'ed params
|
||||||
|
|
||||||
|
@ -278,53 +314,69 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
// Cache flushing
|
// Cache flushing
|
||||||
|
|
||||||
this.cacheFlusher = new Flusher();
|
this.cacheFlusher = new Flusher();
|
||||||
this.cacheFlusherThread = new Thread(cacheFlusher);
|
this.cacheFlusherThread = new Thread(cacheFlusher, "HRegionServer.cacheFlusher");
|
||||||
|
|
||||||
// Check regions to see if they need to be split
|
// Check regions to see if they need to be split
|
||||||
|
|
||||||
this.splitChecker = new SplitChecker();
|
this.splitChecker = new SplitChecker();
|
||||||
this.splitCheckerThread = new Thread(splitChecker);
|
this.splitCheckerThread = new Thread(splitChecker, "HRegionServer.splitChecker");
|
||||||
|
|
||||||
|
// Process requests from Master
|
||||||
|
|
||||||
|
this.toDo = new Vector<HMsg>();
|
||||||
|
this.worker = new Worker();
|
||||||
|
this.workerThread = new Thread(worker, "HRegionServer.worker");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
|
// Server to handle client requests
|
||||||
|
|
||||||
|
this.server = RPC.getServer(this, address.getBindAddress().toString(),
|
||||||
|
address.getPort(), conf.getInt("hbase.hregionserver.handler.count", 10), false, conf);
|
||||||
|
|
||||||
|
this.address = new HServerAddress(server.getListenerAddress());
|
||||||
|
|
||||||
// Local file paths
|
// Local file paths
|
||||||
|
|
||||||
this.fs = FileSystem.get(conf);
|
String serverName = this.address.getBindAddress() + "_" + this.address.getPort();
|
||||||
Path newlogdir = new Path(regionDir, "log" + "_" + address.toString());
|
Path newlogdir = new Path(regionDir, "log" + "_" + serverName);
|
||||||
this.oldlogfile = new Path(regionDir, "oldlogfile" + "_" + address.toString());
|
this.oldlogfile = new Path(regionDir, "oldlogfile" + "_" + serverName);
|
||||||
|
|
||||||
// Logging
|
// Logging
|
||||||
|
|
||||||
|
this.fs = FileSystem.get(conf);
|
||||||
HLog.consolidateOldLog(newlogdir, oldlogfile, fs, conf);
|
HLog.consolidateOldLog(newlogdir, oldlogfile, fs, conf);
|
||||||
this.log = new HLog(fs, newlogdir, conf);
|
this.log = new HLog(fs, newlogdir, conf);
|
||||||
this.logRoller = new LogRoller();
|
this.logRoller = new LogRoller();
|
||||||
this.logRollerThread = new Thread(logRoller);
|
this.logRollerThread = new Thread(logRoller, "HRegionServer.logRoller");
|
||||||
|
|
||||||
// Remote HMaster
|
// Remote HMaster
|
||||||
|
|
||||||
this.hbaseMaster = (HMasterRegionInterface)
|
this.hbaseMaster = (HMasterRegionInterface)
|
||||||
RPC.waitForProxy(HMasterRegionInterface.class,
|
RPC.waitForProxy(HMasterRegionInterface.class,
|
||||||
HMasterRegionInterface.versionId,
|
HMasterRegionInterface.versionID,
|
||||||
new HServerAddress(conf.get(MASTER_DEFAULT_NAME)).getInetSocketAddress(),
|
new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
|
||||||
conf);
|
conf);
|
||||||
|
|
||||||
// Threads
|
// Threads
|
||||||
|
|
||||||
|
this.workerThread.start();
|
||||||
this.cacheFlusherThread.start();
|
this.cacheFlusherThread.start();
|
||||||
this.splitCheckerThread.start();
|
this.splitCheckerThread.start();
|
||||||
this.logRollerThread.start();
|
this.logRollerThread.start();
|
||||||
this.leases = new Leases(conf.getLong("hbase.hregionserver.lease.period",
|
this.leases = new Leases(conf.getLong("hbase.hregionserver.lease.period",
|
||||||
3 * 60 * 1000), threadWakeFrequency);
|
3 * 60 * 1000), threadWakeFrequency);
|
||||||
|
|
||||||
// Server
|
// Server
|
||||||
|
|
||||||
this.server = RPC.getServer(this, address.getBindAddress().toString(),
|
|
||||||
address.getPort(), conf.getInt("hbase.hregionserver.handler.count", 10), false, conf);
|
|
||||||
this.server.start();
|
this.server.start();
|
||||||
|
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
this.stopRequested = true;
|
this.stopRequested = true;
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG.info("HRegionServer started at: " + address.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -334,7 +386,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
* processing to cease.
|
* processing to cease.
|
||||||
*/
|
*/
|
||||||
public void stop() throws IOException {
|
public void stop() throws IOException {
|
||||||
if (!stopRequested) {
|
if(! stopRequested) {
|
||||||
stopRequested = true;
|
stopRequested = true;
|
||||||
|
|
||||||
closeAllRegions();
|
closeAllRegions();
|
||||||
|
@ -342,11 +394,17 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
fs.close();
|
fs.close();
|
||||||
server.stop();
|
server.stop();
|
||||||
}
|
}
|
||||||
|
LOG.info("stopping server at: " + address.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Call join to wait for all the threads to finish */
|
/** Call join to wait for all the threads to finish */
|
||||||
public void join() {
|
public void join() {
|
||||||
|
try {
|
||||||
|
this.workerThread.join();
|
||||||
|
|
||||||
|
} catch(InterruptedException iex) {
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
this.logRollerThread.join();
|
this.logRollerThread.join();
|
||||||
|
|
||||||
|
@ -366,7 +424,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
} catch(InterruptedException iex) {
|
} catch(InterruptedException iex) {
|
||||||
}
|
}
|
||||||
|
LOG.info("server stopped at: " + address.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -375,7 +433,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
* load/unload instructions.
|
* load/unload instructions.
|
||||||
*/
|
*/
|
||||||
public void run() {
|
public void run() {
|
||||||
while(!stopRequested) {
|
while(! stopRequested) {
|
||||||
HServerInfo info = new HServerInfo(address, rand.nextLong());
|
HServerInfo info = new HServerInfo(address, rand.nextLong());
|
||||||
long lastMsg = 0;
|
long lastMsg = 0;
|
||||||
long waitTime;
|
long waitTime;
|
||||||
|
@ -388,18 +446,20 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
waitTime = msgInterval - (System.currentTimeMillis() - lastMsg);
|
waitTime = msgInterval - (System.currentTimeMillis() - lastMsg);
|
||||||
|
|
||||||
try {
|
if(waitTime > 0) {
|
||||||
Thread.sleep(waitTime);
|
try {
|
||||||
|
Thread.sleep(waitTime);
|
||||||
} catch(InterruptedException iex) {
|
|
||||||
|
} catch(InterruptedException iex) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now ask the master what it wants us to do and tell it what we have done.
|
// Now ask the master what it wants us to do and tell it what we have done.
|
||||||
|
|
||||||
while(!stopRequested) {
|
while(! stopRequested) {
|
||||||
if ((System.currentTimeMillis() - lastMsg) >= msgInterval) {
|
if((System.currentTimeMillis() - lastMsg) >= msgInterval) {
|
||||||
|
|
||||||
HMsg outboundArray[] = null;
|
HMsg outboundArray[] = null;
|
||||||
synchronized(outboundMsgs) {
|
synchronized(outboundMsgs) {
|
||||||
|
@ -411,10 +471,33 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
HMsg msgs[] = hbaseMaster.regionServerReport(info, outboundArray);
|
HMsg msgs[] = hbaseMaster.regionServerReport(info, outboundArray);
|
||||||
lastMsg = System.currentTimeMillis();
|
lastMsg = System.currentTimeMillis();
|
||||||
|
|
||||||
// Process the HMaster's instruction stream
|
// Queue up the HMaster's instruction stream for processing
|
||||||
|
|
||||||
if (!processMessages(msgs)) {
|
synchronized(toDo) {
|
||||||
break;
|
boolean restartOrStop = false;
|
||||||
|
for(int i = 0; i < msgs.length; i++) {
|
||||||
|
switch(msgs[i].getMsg()) {
|
||||||
|
|
||||||
|
case HMsg.MSG_CALL_SERVER_STARTUP:
|
||||||
|
closeAllRegions();
|
||||||
|
restartOrStop = true;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HMsg.MSG_REGIONSERVER_ALREADY_RUNNING:
|
||||||
|
stop();
|
||||||
|
restartOrStop = true;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
toDo.add(msgs[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(toDo.size() > 0) {
|
||||||
|
toDo.notifyAll();
|
||||||
|
}
|
||||||
|
if(restartOrStop) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
|
@ -424,55 +507,16 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
waitTime = msgInterval - (System.currentTimeMillis() - lastMsg);
|
waitTime = msgInterval - (System.currentTimeMillis() - lastMsg);
|
||||||
|
|
||||||
try {
|
if(waitTime > 0) {
|
||||||
Thread.sleep(waitTime);
|
try {
|
||||||
} catch(InterruptedException iex) {
|
Thread.sleep(waitTime);
|
||||||
|
} catch(InterruptedException iex) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean processMessages(HMsg[] msgs) throws IOException {
|
|
||||||
for(int i = 0; i < msgs.length; i++) {
|
|
||||||
switch(msgs[i].getMsg()) {
|
|
||||||
|
|
||||||
case HMsg.MSG_REGION_OPEN: // Open a region
|
|
||||||
openRegion(msgs[i].getRegionInfo());
|
|
||||||
break;
|
|
||||||
|
|
||||||
case HMsg.MSG_REGION_CLOSE: // Close a region
|
|
||||||
closeRegion(msgs[i].getRegionInfo(), true);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case HMsg.MSG_REGION_MERGE: // Merge two regions
|
|
||||||
//TODO ???
|
|
||||||
throw new IOException("TODO: need to figure out merge");
|
|
||||||
//break;
|
|
||||||
|
|
||||||
case HMsg.MSG_CALL_SERVER_STARTUP: // Close regions, restart
|
|
||||||
closeAllRegions();
|
|
||||||
return false;
|
|
||||||
|
|
||||||
case HMsg.MSG_REGIONSERVER_ALREADY_RUNNING: // Go away
|
|
||||||
stop();
|
|
||||||
return false;
|
|
||||||
|
|
||||||
case HMsg.MSG_REGION_CLOSE_WITHOUT_REPORT: // Close a region, don't reply
|
|
||||||
closeRegion(msgs[i].getRegionInfo(), false);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case HMsg.MSG_REGION_CLOSE_AND_DELETE:
|
|
||||||
closeAndDeleteRegion(msgs[i].getRegionInfo());
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
throw new IOException("Impossible state during msg processing. Instruction: " + msgs[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Add to the outbound message buffer */
|
/** Add to the outbound message buffer */
|
||||||
private void reportOpen(HRegion region) {
|
private void reportOpen(HRegion region) {
|
||||||
synchronized(outboundMsgs) {
|
synchronized(outboundMsgs) {
|
||||||
|
@ -508,9 +552,68 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
// HMaster-given operations
|
// HMaster-given operations
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
private Vector<HMsg> toDo;
|
||||||
|
private Worker worker;
|
||||||
|
private Thread workerThread;
|
||||||
|
private class Worker implements Runnable {
|
||||||
|
public void run() {
|
||||||
|
while(!stopRequested) {
|
||||||
|
HMsg msg = null;
|
||||||
|
synchronized(toDo) {
|
||||||
|
while(toDo.size() == 0) {
|
||||||
|
try {
|
||||||
|
toDo.wait();
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg = toDo.remove(0);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
switch(msg.getMsg()) {
|
||||||
|
|
||||||
|
case HMsg.MSG_REGION_OPEN: // Open a region
|
||||||
|
openRegion(msg.getRegionInfo());
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HMsg.MSG_REGION_CLOSE: // Close a region
|
||||||
|
closeRegion(msg.getRegionInfo(), true);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HMsg.MSG_REGION_MERGE: // Merge two regions
|
||||||
|
//TODO ???
|
||||||
|
throw new IOException("TODO: need to figure out merge");
|
||||||
|
//break;
|
||||||
|
|
||||||
|
case HMsg.MSG_CALL_SERVER_STARTUP: // Close regions, restart
|
||||||
|
closeAllRegions();
|
||||||
|
continue;
|
||||||
|
|
||||||
|
case HMsg.MSG_REGIONSERVER_ALREADY_RUNNING: // Go away
|
||||||
|
stop();
|
||||||
|
continue;
|
||||||
|
|
||||||
|
case HMsg.MSG_REGION_CLOSE_WITHOUT_REPORT: // Close a region, don't reply
|
||||||
|
closeRegion(msg.getRegionInfo(), false);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HMsg.MSG_REGION_CLOSE_AND_DELETE:
|
||||||
|
closeAndDeleteRegion(msg.getRegionInfo());
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw new IOException("Impossible state during msg processing. Instruction: " + msg);
|
||||||
|
}
|
||||||
|
} catch(IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void openRegion(HRegionInfo regionInfo) throws IOException {
|
private void openRegion(HRegionInfo regionInfo) throws IOException {
|
||||||
|
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
HRegion region = new HRegion(regionDir, log, fs, conf, regionInfo, null, oldlogfile);
|
HRegion region = new HRegion(regionDir, log, fs, conf, regionInfo, null, oldlogfile);
|
||||||
|
|
||||||
|
@ -518,57 +621,57 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
reportOpen(region);
|
reportOpen(region);
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closeRegion(HRegionInfo info, boolean reportWhenCompleted)
|
private void closeRegion(HRegionInfo info, boolean reportWhenCompleted)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
HRegion region = regions.remove(info.regionName);
|
HRegion region = regions.remove(info.regionName);
|
||||||
|
|
||||||
if (region != null) {
|
if(region != null) {
|
||||||
region.close();
|
region.close();
|
||||||
|
|
||||||
if (reportWhenCompleted) {
|
if(reportWhenCompleted) {
|
||||||
reportClose(region);
|
reportClose(region);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closeAndDeleteRegion(HRegionInfo info) throws IOException {
|
private void closeAndDeleteRegion(HRegionInfo info) throws IOException {
|
||||||
|
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
HRegion region = regions.remove(info.regionName);
|
HRegion region = regions.remove(info.regionName);
|
||||||
|
|
||||||
if (region != null) {
|
if(region != null) {
|
||||||
region.closeAndDelete();
|
region.closeAndDelete();
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Called either when the master tells us to restart or from stop() */
|
/** Called either when the master tells us to restart or from stop() */
|
||||||
private void closeAllRegions() throws IOException {
|
private void closeAllRegions() throws IOException {
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
for(Iterator<HRegion> it = regions.values().iterator(); it.hasNext();) {
|
for(Iterator<HRegion> it = regions.values().iterator(); it.hasNext(); ) {
|
||||||
HRegion region = it.next();
|
HRegion region = it.next();
|
||||||
region.close();
|
region.close();
|
||||||
}
|
}
|
||||||
regions.clear();
|
regions.clear();
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -580,24 +683,24 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
*
|
*
|
||||||
* For now, we do not do merging. Splits are driven by the HRegionServer.
|
* For now, we do not do merging. Splits are driven by the HRegionServer.
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
/*
|
/*
|
||||||
private void mergeRegions(Text regionNameA, Text regionNameB) throws IOException {
|
private void mergeRegions(Text regionNameA, Text regionNameB) throws IOException {
|
||||||
locking.obtainWriteLock();
|
locking.obtainWriteLock();
|
||||||
try {
|
try {
|
||||||
HRegion srcA = regions.remove(regionNameA);
|
HRegion srcA = regions.remove(regionNameA);
|
||||||
HRegion srcB = regions.remove(regionNameB);
|
HRegion srcB = regions.remove(regionNameB);
|
||||||
HRegion newRegion = HRegion.closeAndMerge(srcA, srcB);
|
HRegion newRegion = HRegion.closeAndMerge(srcA, srcB);
|
||||||
regions.put(newRegion.getRegionName(), newRegion);
|
regions.put(newRegion.getRegionName(), newRegion);
|
||||||
|
|
||||||
reportClose(srcA);
|
reportClose(srcA);
|
||||||
reportClose(srcB);
|
reportClose(srcB);
|
||||||
reportOpen(newRegion);
|
reportOpen(newRegion);
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
locking.releaseWriteLock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
// HRegionInterface
|
// HRegionInterface
|
||||||
|
@ -606,32 +709,21 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
/** Obtain a table descriptor for the given region */
|
/** Obtain a table descriptor for the given region */
|
||||||
public HRegionInfo getRegionInfo(Text regionName) {
|
public HRegionInfo getRegionInfo(Text regionName) {
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return region.getRegionInfo();
|
return region.getRegionInfo();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Start a scanner for a given HRegion. */
|
|
||||||
public HScannerInterface openScanner(Text regionName, Text[] cols,
|
|
||||||
Text firstRow) throws IOException {
|
|
||||||
|
|
||||||
HRegion r = getRegion(regionName);
|
|
||||||
if (r == null) {
|
|
||||||
throw new IOException("Not serving region " + regionName);
|
|
||||||
}
|
|
||||||
return r.getScanner(cols, firstRow);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Get the indicated row/column */
|
/** Get the indicated row/column */
|
||||||
public BytesWritable get(Text regionName, Text row, Text column) throws IOException {
|
public BytesWritable get(Text regionName, Text row, Text column) throws IOException {
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
byte results[] = region.get(row, column);
|
byte results[] = region.get(row, column);
|
||||||
if (results != null) {
|
if(results != null) {
|
||||||
return new BytesWritable(results);
|
return new BytesWritable(results);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
@ -639,18 +731,18 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
/** Get multiple versions of the indicated row/col */
|
/** Get multiple versions of the indicated row/col */
|
||||||
public BytesWritable[] get(Text regionName, Text row, Text column,
|
public BytesWritable[] get(Text regionName, Text row, Text column,
|
||||||
int numVersions) throws IOException {
|
int numVersions) throws IOException {
|
||||||
|
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
byte results[][] = region.get(row, column, numVersions);
|
byte results[][] = region.get(row, column, numVersions);
|
||||||
if (results != null) {
|
if(results != null) {
|
||||||
BytesWritable realResults[] = new BytesWritable[results.length];
|
BytesWritable realResults[] = new BytesWritable[results.length];
|
||||||
for(int i = 0; i < realResults.length; i++) {
|
for(int i = 0; i < realResults.length; i++) {
|
||||||
if (results[i] != null) {
|
if(results[i] != null) {
|
||||||
realResults[i] = new BytesWritable(results[i]);
|
realResults[i] = new BytesWritable(results[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -661,18 +753,18 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
/** Get multiple timestamped versions of the indicated row/col */
|
/** Get multiple timestamped versions of the indicated row/col */
|
||||||
public BytesWritable[] get(Text regionName, Text row, Text column,
|
public BytesWritable[] get(Text regionName, Text row, Text column,
|
||||||
long timestamp, int numVersions) throws IOException {
|
long timestamp, int numVersions) throws IOException {
|
||||||
|
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
byte results[][] = region.get(row, column, timestamp, numVersions);
|
byte results[][] = region.get(row, column, timestamp, numVersions);
|
||||||
if (results != null) {
|
if(results != null) {
|
||||||
BytesWritable realResults[] = new BytesWritable[results.length];
|
BytesWritable realResults[] = new BytesWritable[results.length];
|
||||||
for(int i = 0; i < realResults.length; i++) {
|
for(int i = 0; i < realResults.length; i++) {
|
||||||
if (results[i] != null) {
|
if(results[i] != null) {
|
||||||
realResults[i] = new BytesWritable(results[i]);
|
realResults[i] = new BytesWritable(results[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -684,14 +776,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
/** Get all the columns (along with their names) for a given row. */
|
/** Get all the columns (along with their names) for a given row. */
|
||||||
public LabelledData[] getRow(Text regionName, Text row) throws IOException {
|
public LabelledData[] getRow(Text regionName, Text row) throws IOException {
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
TreeMap<Text, byte[]> map = region.getFull(row);
|
TreeMap<Text, byte[]> map = region.getFull(row);
|
||||||
LabelledData result[] = new LabelledData[map.size()];
|
LabelledData result[] = new LabelledData[map.size()];
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
for(Iterator<Text> it = map.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = map.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text colname = it.next();
|
Text colname = it.next();
|
||||||
byte val[] = map.get(colname);
|
byte val[] = map.get(colname);
|
||||||
result[counter++] = new LabelledData(colname, val);
|
result[counter++] = new LabelledData(colname, val);
|
||||||
|
@ -723,77 +815,77 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public long startUpdate(Text regionName, long clientid, Text row)
|
public long startUpdate(Text regionName, long clientid, Text row)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
long lockid = region.startUpdate(row);
|
long lockid = region.startUpdate(row);
|
||||||
leases.createLease(new Text(String.valueOf(clientid)),
|
leases.createLease(new Text(String.valueOf(clientid)),
|
||||||
new Text(String.valueOf(lockid)),
|
new Text(String.valueOf(lockid)),
|
||||||
new RegionListener(region, lockid));
|
new RegionListener(region, lockid));
|
||||||
|
|
||||||
return lockid;
|
return lockid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Add something to the HBase. */
|
/** Add something to the HBase. */
|
||||||
public void put(Text regionName, long clientid, long lockid, Text column,
|
public void put(Text regionName, long clientid, long lockid, Text column,
|
||||||
BytesWritable val) throws IOException {
|
BytesWritable val) throws IOException {
|
||||||
|
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
leases.renewLease(new Text(String.valueOf(clientid)),
|
leases.renewLease(new Text(String.valueOf(clientid)),
|
||||||
new Text(String.valueOf(lockid)));
|
new Text(String.valueOf(lockid)));
|
||||||
|
|
||||||
region.put(lockid, column, val.get());
|
region.put(lockid, column, val.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Remove a cell from the HBase. */
|
/** Remove a cell from the HBase. */
|
||||||
public void delete(Text regionName, long clientid, long lockid, Text column)
|
public void delete(Text regionName, long clientid, long lockid, Text column)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
leases.renewLease(new Text(String.valueOf(clientid)),
|
leases.renewLease(new Text(String.valueOf(clientid)),
|
||||||
new Text(String.valueOf(lockid)));
|
new Text(String.valueOf(lockid)));
|
||||||
|
|
||||||
region.delete(lockid, column);
|
region.delete(lockid, column);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Abandon the transaction */
|
/** Abandon the transaction */
|
||||||
public void abort(Text regionName, long clientid, long lockid)
|
public void abort(Text regionName, long clientid, long lockid)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
leases.cancelLease(new Text(String.valueOf(clientid)),
|
leases.cancelLease(new Text(String.valueOf(clientid)),
|
||||||
new Text(String.valueOf(lockid)));
|
new Text(String.valueOf(lockid)));
|
||||||
|
|
||||||
region.abort(lockid);
|
region.abort(lockid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Confirm the transaction */
|
/** Confirm the transaction */
|
||||||
public void commit(Text regionName, long clientid, long lockid)
|
public void commit(Text regionName, long clientid, long lockid)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
if (region == null) {
|
if(region == null) {
|
||||||
throw new IOException("Not serving region " + regionName);
|
throw new IOException("Not serving region " + regionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
leases.cancelLease(new Text(String.valueOf(clientid)),
|
leases.cancelLease(new Text(String.valueOf(clientid)),
|
||||||
new Text(String.valueOf(lockid)));
|
new Text(String.valueOf(lockid)));
|
||||||
|
|
||||||
region.commit(lockid);
|
region.commit(lockid);
|
||||||
}
|
}
|
||||||
|
@ -801,18 +893,131 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
/** Don't let the client's lease expire just yet... */
|
/** Don't let the client's lease expire just yet... */
|
||||||
public void renewLease(long lockid, long clientid) throws IOException {
|
public void renewLease(long lockid, long clientid) throws IOException {
|
||||||
leases.renewLease(new Text(String.valueOf(clientid)),
|
leases.renewLease(new Text(String.valueOf(clientid)),
|
||||||
new Text(String.valueOf(lockid)));
|
new Text(String.valueOf(lockid)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Private utility method for safely obtaining an HRegion handle. */
|
/** Private utility method for safely obtaining an HRegion handle. */
|
||||||
private HRegion getRegion(Text regionName) {
|
private HRegion getRegion(Text regionName) {
|
||||||
locking.obtainReadLock();
|
this.locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
return regions.get(regionName);
|
return regions.get(regionName);
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
this.locker.readLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// remote scanner interface
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
private Map<Text, HScannerInterface> scanners;
|
||||||
|
private class ScannerListener extends LeaseListener {
|
||||||
|
private Text scannerName;
|
||||||
|
|
||||||
|
public ScannerListener(Text scannerName) {
|
||||||
|
this.scannerName = scannerName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void leaseExpired() {
|
||||||
|
HScannerInterface s = scanners.remove(scannerName);
|
||||||
|
if(s != null) {
|
||||||
|
try {
|
||||||
|
s.close();
|
||||||
|
|
||||||
|
} catch(IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Start a scanner for a given HRegion. */
|
||||||
|
public long openScanner(Text regionName, Text[] cols, Text firstRow)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
|
HRegion r = getRegion(regionName);
|
||||||
|
if(r == null) {
|
||||||
|
throw new IOException("Not serving region " + regionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
long scannerId = -1L;
|
||||||
|
try {
|
||||||
|
HScannerInterface s = r.getScanner(cols, firstRow);
|
||||||
|
scannerId = rand.nextLong();
|
||||||
|
Text scannerName = new Text(String.valueOf(scannerId));
|
||||||
|
scanners.put(scannerName, s);
|
||||||
|
leases.createLease(scannerName, scannerName, new ScannerListener(scannerName));
|
||||||
|
|
||||||
|
} catch(IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
return scannerId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public LabelledData[] next(long scannerId, HStoreKey key) throws IOException {
|
||||||
|
|
||||||
|
Text scannerName = new Text(String.valueOf(scannerId));
|
||||||
|
HScannerInterface s = scanners.get(scannerName);
|
||||||
|
if(s == null) {
|
||||||
|
throw new IOException("unknown scanner");
|
||||||
|
}
|
||||||
|
leases.renewLease(scannerName, scannerName);
|
||||||
|
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||||
|
ArrayList<LabelledData> values = new ArrayList<LabelledData>();
|
||||||
|
if(s.next(key, results)) {
|
||||||
|
for(Iterator<Map.Entry<Text, byte[]>> it = results.entrySet().iterator();
|
||||||
|
it.hasNext(); ) {
|
||||||
|
Map.Entry<Text, byte[]> e = it.next();
|
||||||
|
values.add(new LabelledData(e.getKey(), e.getValue()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values.toArray(new LabelledData[values.size()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void close(long scannerId) throws IOException {
|
||||||
|
Text scannerName = new Text(String.valueOf(scannerId));
|
||||||
|
HScannerInterface s = scanners.remove(scannerName);
|
||||||
|
if(s == null) {
|
||||||
|
throw new IOException("unknown scanner");
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
s.close();
|
||||||
|
|
||||||
|
} catch(IOException ex) {
|
||||||
|
ex.printStackTrace();
|
||||||
|
}
|
||||||
|
leases.cancelLease(scannerName, scannerName);
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Main program
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
private static void printUsage() {
|
||||||
|
System.err.println("Usage: java " +
|
||||||
|
"org.apache.hbase.HRegionServer [--bind=hostname:port]");
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String [] args) throws IOException {
|
||||||
|
Configuration conf = new HBaseConfiguration();
|
||||||
|
|
||||||
|
// Process command-line args. TODO: Better cmd-line processing
|
||||||
|
// (but hopefully something not as painful as cli options).
|
||||||
|
for (String cmd: args) {
|
||||||
|
if (cmd.equals("-h") || cmd.startsWith("--h")) {
|
||||||
|
printUsage();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
final String addressArgKey = "--bind=";
|
||||||
|
if (cmd.startsWith(addressArgKey)) {
|
||||||
|
conf.set(REGIONSERVER_ADDRESS,
|
||||||
|
cmd.substring(addressArgKey.length()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
new HRegionServer(conf);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,9 +33,15 @@ public class HServerAddress implements Writable {
|
||||||
this.stringValue = null;
|
this.stringValue = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public HServerAddress(InetSocketAddress address) {
|
||||||
|
this.address = address;
|
||||||
|
this.stringValue = new String(address.getAddress().getHostAddress()
|
||||||
|
+ ":" + address.getPort());
|
||||||
|
}
|
||||||
|
|
||||||
public HServerAddress(String hostAndPort) {
|
public HServerAddress(String hostAndPort) {
|
||||||
int colonIndex = hostAndPort.indexOf(':');
|
int colonIndex = hostAndPort.indexOf(':');
|
||||||
if (colonIndex < 0) {
|
if(colonIndex < 0) {
|
||||||
throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
|
throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
|
||||||
}
|
}
|
||||||
String host = hostAndPort.substring(0, colonIndex);
|
String host = hostAndPort.substring(0, colonIndex);
|
||||||
|
@ -80,7 +86,7 @@ public class HServerAddress implements Writable {
|
||||||
String bindAddress = in.readUTF();
|
String bindAddress = in.readUTF();
|
||||||
int port = in.readInt();
|
int port = in.readInt();
|
||||||
|
|
||||||
if (bindAddress == null || bindAddress.length() == 0) {
|
if(bindAddress == null || bindAddress.length() == 0) {
|
||||||
address = null;
|
address = null;
|
||||||
stringValue = null;
|
stringValue = null;
|
||||||
|
|
||||||
|
@ -91,7 +97,7 @@ public class HServerAddress implements Writable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void write(DataOutput out) throws IOException {
|
public void write(DataOutput out) throws IOException {
|
||||||
if (address == null) {
|
if(address == null) {
|
||||||
out.writeUTF("");
|
out.writeUTF("");
|
||||||
out.writeInt(0);
|
out.writeInt(0);
|
||||||
|
|
||||||
|
|
|
@ -15,14 +15,26 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.DataInputStream;
|
||||||
|
import java.io.DataOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
import java.util.Vector;
|
||||||
|
import java.util.concurrent.locks.ReadWriteLock;
|
||||||
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.*;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.io.*;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.*;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.io.BytesWritable;
|
||||||
import java.io.*;
|
import org.apache.hadoop.io.MapFile;
|
||||||
import java.util.*;
|
import org.apache.hadoop.io.SequenceFile;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* HStore maintains a bunch of data files. It is responsible for maintaining
|
* HStore maintains a bunch of data files. It is responsible for maintaining
|
||||||
|
@ -53,7 +65,7 @@ public class HStore {
|
||||||
Integer compactLock = new Integer(0);
|
Integer compactLock = new Integer(0);
|
||||||
Integer flushLock = new Integer(0);
|
Integer flushLock = new Integer(0);
|
||||||
|
|
||||||
HLocking locking = new HLocking();
|
ReadWriteLock locker = new ReentrantReadWriteLock();
|
||||||
|
|
||||||
TreeMap<Long, MapFile.Reader> maps = new TreeMap<Long, MapFile.Reader>();
|
TreeMap<Long, MapFile.Reader> maps = new TreeMap<Long, MapFile.Reader>();
|
||||||
TreeMap<Long, HStoreFile> mapFiles = new TreeMap<Long, HStoreFile>();
|
TreeMap<Long, HStoreFile> mapFiles = new TreeMap<Long, HStoreFile>();
|
||||||
|
@ -88,7 +100,7 @@ public class HStore {
|
||||||
* will be deleted (by whoever has instantiated the HStore).
|
* will be deleted (by whoever has instantiated the HStore).
|
||||||
*/
|
*/
|
||||||
public HStore(Path dir, Text regionName, Text colFamily, int maxVersions,
|
public HStore(Path dir, Text regionName, Text colFamily, int maxVersions,
|
||||||
FileSystem fs, Path reconstructionLog, Configuration conf) throws IOException {
|
FileSystem fs, Path reconstructionLog, Configuration conf) throws IOException {
|
||||||
|
|
||||||
this.dir = dir;
|
this.dir = dir;
|
||||||
this.regionName = regionName;
|
this.regionName = regionName;
|
||||||
|
@ -110,7 +122,7 @@ public class HStore {
|
||||||
|
|
||||||
this.compactdir = new Path(dir, COMPACTION_DIR);
|
this.compactdir = new Path(dir, COMPACTION_DIR);
|
||||||
Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
|
Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
|
||||||
if (fs.exists(curCompactStore)) {
|
if(fs.exists(curCompactStore)) {
|
||||||
processReadyCompaction();
|
processReadyCompaction();
|
||||||
fs.delete(curCompactStore);
|
fs.delete(curCompactStore);
|
||||||
}
|
}
|
||||||
|
@ -123,7 +135,7 @@ public class HStore {
|
||||||
Vector<HStoreFile> hstoreFiles
|
Vector<HStoreFile> hstoreFiles
|
||||||
= HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs);
|
= HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs);
|
||||||
|
|
||||||
for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
mapFiles.put(hsf.loadInfo(fs), hsf);
|
mapFiles.put(hsf.loadInfo(fs), hsf);
|
||||||
}
|
}
|
||||||
|
@ -138,11 +150,11 @@ public class HStore {
|
||||||
// contain any updates also contained in the log.
|
// contain any updates also contained in the log.
|
||||||
|
|
||||||
long maxSeqID = -1;
|
long maxSeqID = -1;
|
||||||
for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
long seqid = hsf.loadInfo(fs);
|
long seqid = hsf.loadInfo(fs);
|
||||||
if (seqid > 0) {
|
if(seqid > 0) {
|
||||||
if (seqid > maxSeqID) {
|
if(seqid > maxSeqID) {
|
||||||
maxSeqID = seqid;
|
maxSeqID = seqid;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -157,7 +169,7 @@ public class HStore {
|
||||||
|
|
||||||
LOG.debug("reading reconstructionLog");
|
LOG.debug("reading reconstructionLog");
|
||||||
|
|
||||||
if (reconstructionLog != null && fs.exists(reconstructionLog)) {
|
if(reconstructionLog != null && fs.exists(reconstructionLog)) {
|
||||||
long maxSeqIdInLog = -1;
|
long maxSeqIdInLog = -1;
|
||||||
TreeMap<HStoreKey, BytesWritable> reconstructedCache
|
TreeMap<HStoreKey, BytesWritable> reconstructedCache
|
||||||
= new TreeMap<HStoreKey, BytesWritable>();
|
= new TreeMap<HStoreKey, BytesWritable>();
|
||||||
|
@ -170,18 +182,18 @@ public class HStore {
|
||||||
HLogEdit val = new HLogEdit();
|
HLogEdit val = new HLogEdit();
|
||||||
while(login.next(key, val)) {
|
while(login.next(key, val)) {
|
||||||
maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
|
maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
|
||||||
if (key.getLogSeqNum() <= maxSeqID) {
|
if(key.getLogSeqNum() <= maxSeqID) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
reconstructedCache.put(new HStoreKey(key.getRow(), val.getColumn(),
|
reconstructedCache.put(new HStoreKey(key.getRow(), val.getColumn(),
|
||||||
val.getTimestamp()), val.getVal());
|
val.getTimestamp()), val.getVal());
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
login.close();
|
login.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reconstructedCache.size() > 0) {
|
if(reconstructedCache.size() > 0) {
|
||||||
|
|
||||||
// We create a "virtual flush" at maxSeqIdInLog+1.
|
// We create a "virtual flush" at maxSeqIdInLog+1.
|
||||||
|
|
||||||
|
@ -195,7 +207,7 @@ public class HStore {
|
||||||
// should be "timeless"; that is, it should not have an associated seq-ID,
|
// should be "timeless"; that is, it should not have an associated seq-ID,
|
||||||
// because all log messages have been reflected in the TreeMaps at this point.
|
// because all log messages have been reflected in the TreeMaps at this point.
|
||||||
|
|
||||||
if (mapFiles.size() >= 1) {
|
if(mapFiles.size() >= 1) {
|
||||||
compactHelper(true);
|
compactHelper(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,7 +216,7 @@ public class HStore {
|
||||||
|
|
||||||
LOG.debug("starting map readers");
|
LOG.debug("starting map readers");
|
||||||
|
|
||||||
for(Iterator<Long> it = mapFiles.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Long> it = mapFiles.keySet().iterator(); it.hasNext(); ) {
|
||||||
Long key = it.next().longValue();
|
Long key = it.next().longValue();
|
||||||
HStoreFile hsf = mapFiles.get(key);
|
HStoreFile hsf = mapFiles.get(key);
|
||||||
|
|
||||||
|
@ -218,11 +230,11 @@ public class HStore {
|
||||||
|
|
||||||
/** Turn off all the MapFile readers */
|
/** Turn off all the MapFile readers */
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily);
|
LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext();) {
|
for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext(); ) {
|
||||||
MapFile.Reader map = it.next();
|
MapFile.Reader map = it.next();
|
||||||
map.close();
|
map.close();
|
||||||
}
|
}
|
||||||
|
@ -232,7 +244,7 @@ public class HStore {
|
||||||
LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily);
|
LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily);
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -252,13 +264,13 @@ public class HStore {
|
||||||
* Return the entire list of HStoreFiles currently used by the HStore.
|
* Return the entire list of HStoreFiles currently used by the HStore.
|
||||||
*/
|
*/
|
||||||
public Vector<HStoreFile> flushCache(TreeMap<HStoreKey, BytesWritable> inputCache,
|
public Vector<HStoreFile> flushCache(TreeMap<HStoreKey, BytesWritable> inputCache,
|
||||||
long logCacheFlushId) throws IOException {
|
long logCacheFlushId) throws IOException {
|
||||||
|
|
||||||
return flushCacheHelper(inputCache, logCacheFlushId, true);
|
return flushCacheHelper(inputCache, logCacheFlushId, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
Vector<HStoreFile> flushCacheHelper(TreeMap<HStoreKey, BytesWritable> inputCache,
|
Vector<HStoreFile> flushCacheHelper(TreeMap<HStoreKey, BytesWritable> inputCache,
|
||||||
long logCacheFlushId, boolean addToAvailableMaps) throws IOException {
|
long logCacheFlushId, boolean addToAvailableMaps) throws IOException {
|
||||||
|
|
||||||
synchronized(flushLock) {
|
synchronized(flushLock) {
|
||||||
LOG.debug("flushing HStore " + this.regionName + "/" + this.colFamily);
|
LOG.debug("flushing HStore " + this.regionName + "/" + this.colFamily);
|
||||||
|
@ -270,12 +282,12 @@ public class HStore {
|
||||||
|
|
||||||
Path mapfile = flushedFile.getMapFilePath();
|
Path mapfile = flushedFile.getMapFilePath();
|
||||||
MapFile.Writer out = new MapFile.Writer(conf, fs, mapfile.toString(),
|
MapFile.Writer out = new MapFile.Writer(conf, fs, mapfile.toString(),
|
||||||
HStoreKey.class, BytesWritable.class);
|
HStoreKey.class, BytesWritable.class);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
for(Iterator<HStoreKey> it = inputCache.keySet().iterator(); it.hasNext();) {
|
for(Iterator<HStoreKey> it = inputCache.keySet().iterator(); it.hasNext(); ) {
|
||||||
HStoreKey curkey = it.next();
|
HStoreKey curkey = it.next();
|
||||||
if (this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
|
if(this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
|
||||||
BytesWritable val = inputCache.get(curkey);
|
BytesWritable val = inputCache.get(curkey);
|
||||||
out.append(curkey, val);
|
out.append(curkey, val);
|
||||||
}
|
}
|
||||||
|
@ -294,8 +306,8 @@ public class HStore {
|
||||||
|
|
||||||
// C. Finally, make the new MapFile available.
|
// C. Finally, make the new MapFile available.
|
||||||
|
|
||||||
if (addToAvailableMaps) {
|
if(addToAvailableMaps) {
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
maps.put(logCacheFlushId, new MapFile.Reader(fs, mapfile.toString(), conf));
|
maps.put(logCacheFlushId, new MapFile.Reader(fs, mapfile.toString(), conf));
|
||||||
|
@ -303,7 +315,7 @@ public class HStore {
|
||||||
LOG.debug("HStore available for " + this.regionName + "/" + this.colFamily);
|
LOG.debug("HStore available for " + this.regionName + "/" + this.colFamily);
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return getAllMapFiles();
|
return getAllMapFiles();
|
||||||
|
@ -312,7 +324,7 @@ public class HStore {
|
||||||
|
|
||||||
public Vector<HStoreFile> getAllMapFiles() {
|
public Vector<HStoreFile> getAllMapFiles() {
|
||||||
Vector<HStoreFile> flushedFiles = new Vector<HStoreFile>();
|
Vector<HStoreFile> flushedFiles = new Vector<HStoreFile>();
|
||||||
for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
flushedFiles.add(hsf);
|
flushedFiles.add(hsf);
|
||||||
}
|
}
|
||||||
|
@ -355,22 +367,22 @@ public class HStore {
|
||||||
// Grab a list of files to compact.
|
// Grab a list of files to compact.
|
||||||
|
|
||||||
Vector<HStoreFile> toCompactFiles = null;
|
Vector<HStoreFile> toCompactFiles = null;
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
toCompactFiles = new Vector<HStoreFile>(mapFiles.values());
|
toCompactFiles = new Vector<HStoreFile>(mapFiles.values());
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute the max-sequenceID seen in any of the to-be-compacted TreeMaps
|
// Compute the max-sequenceID seen in any of the to-be-compacted TreeMaps
|
||||||
|
|
||||||
long maxSeenSeqID = -1;
|
long maxSeenSeqID = -1;
|
||||||
for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
long seqid = hsf.loadInfo(fs);
|
long seqid = hsf.loadInfo(fs);
|
||||||
if (seqid > 0) {
|
if(seqid > 0) {
|
||||||
if (seqid > maxSeenSeqID) {
|
if(seqid > maxSeenSeqID) {
|
||||||
maxSeenSeqID = seqid;
|
maxSeenSeqID = seqid;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -380,11 +392,11 @@ public class HStore {
|
||||||
HStoreFile compactedOutputFile
|
HStoreFile compactedOutputFile
|
||||||
= new HStoreFile(conf, compactdir, regionName, colFamily, -1);
|
= new HStoreFile(conf, compactdir, regionName, colFamily, -1);
|
||||||
|
|
||||||
if (toCompactFiles.size() == 1) {
|
if(toCompactFiles.size() == 1) {
|
||||||
LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily);
|
LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily);
|
||||||
|
|
||||||
HStoreFile hsf = toCompactFiles.elementAt(0);
|
HStoreFile hsf = toCompactFiles.elementAt(0);
|
||||||
if (hsf.loadInfo(fs) == -1) {
|
if(hsf.loadInfo(fs) == -1) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -392,8 +404,8 @@ public class HStore {
|
||||||
// Step through them, writing to the brand-new TreeMap
|
// Step through them, writing to the brand-new TreeMap
|
||||||
|
|
||||||
MapFile.Writer compactedOut = new MapFile.Writer(conf, fs,
|
MapFile.Writer compactedOut = new MapFile.Writer(conf, fs,
|
||||||
compactedOutputFile.getMapFilePath().toString(), HStoreKey.class,
|
compactedOutputFile.getMapFilePath().toString(), HStoreKey.class,
|
||||||
BytesWritable.class);
|
BytesWritable.class);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
|
@ -414,7 +426,7 @@ public class HStore {
|
||||||
BytesWritable[] vals = new BytesWritable[toCompactFiles.size()];
|
BytesWritable[] vals = new BytesWritable[toCompactFiles.size()];
|
||||||
boolean[] done = new boolean[toCompactFiles.size()];
|
boolean[] done = new boolean[toCompactFiles.size()];
|
||||||
int pos = 0;
|
int pos = 0;
|
||||||
for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
readers[pos] = new MapFile.Reader(fs, hsf.getMapFilePath().toString(), conf);
|
readers[pos] = new MapFile.Reader(fs, hsf.getMapFilePath().toString(), conf);
|
||||||
keys[pos] = new HStoreKey();
|
keys[pos] = new HStoreKey();
|
||||||
|
@ -431,8 +443,8 @@ public class HStore {
|
||||||
int numDone = 0;
|
int numDone = 0;
|
||||||
for(int i = 0; i < readers.length; i++) {
|
for(int i = 0; i < readers.length; i++) {
|
||||||
readers[i].reset();
|
readers[i].reset();
|
||||||
done[i] = !readers[i].next(keys[i], vals[i]);
|
done[i] = ! readers[i].next(keys[i], vals[i]);
|
||||||
if (done[i]) {
|
if(done[i]) {
|
||||||
numDone++;
|
numDone++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -446,15 +458,15 @@ public class HStore {
|
||||||
|
|
||||||
int smallestKey = -1;
|
int smallestKey = -1;
|
||||||
for(int i = 0; i < readers.length; i++) {
|
for(int i = 0; i < readers.length; i++) {
|
||||||
if (done[i]) {
|
if(done[i]) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (smallestKey < 0) {
|
if(smallestKey < 0) {
|
||||||
smallestKey = i;
|
smallestKey = i;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if (keys[i].compareTo(keys[smallestKey]) < 0) {
|
if(keys[i].compareTo(keys[smallestKey]) < 0) {
|
||||||
smallestKey = i;
|
smallestKey = i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -463,7 +475,7 @@ public class HStore {
|
||||||
// Reflect the current key/val in the output
|
// Reflect the current key/val in the output
|
||||||
|
|
||||||
HStoreKey sk = keys[smallestKey];
|
HStoreKey sk = keys[smallestKey];
|
||||||
if (lastRow.equals(sk.getRow())
|
if(lastRow.equals(sk.getRow())
|
||||||
&& lastColumn.equals(sk.getColumn())) {
|
&& lastColumn.equals(sk.getColumn())) {
|
||||||
|
|
||||||
timesSeen++;
|
timesSeen++;
|
||||||
|
@ -472,12 +484,12 @@ public class HStore {
|
||||||
timesSeen = 1;
|
timesSeen = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (timesSeen <= maxVersions) {
|
if(timesSeen <= maxVersions) {
|
||||||
|
|
||||||
// Keep old versions until we have maxVersions worth.
|
// Keep old versions until we have maxVersions worth.
|
||||||
// Then just skip them.
|
// Then just skip them.
|
||||||
|
|
||||||
if (sk.getRow().getLength() != 0
|
if(sk.getRow().getLength() != 0
|
||||||
&& sk.getColumn().getLength() != 0) {
|
&& sk.getColumn().getLength() != 0) {
|
||||||
|
|
||||||
// Only write out objects which have a non-zero length key and value
|
// Only write out objects which have a non-zero length key and value
|
||||||
|
@ -499,7 +511,7 @@ public class HStore {
|
||||||
// Advance the smallest key. If that reader's all finished, then
|
// Advance the smallest key. If that reader's all finished, then
|
||||||
// mark it as done.
|
// mark it as done.
|
||||||
|
|
||||||
if (!readers[smallestKey].next(keys[smallestKey], vals[smallestKey])) {
|
if(! readers[smallestKey].next(keys[smallestKey], vals[smallestKey])) {
|
||||||
done[smallestKey] = true;
|
done[smallestKey] = true;
|
||||||
readers[smallestKey].close();
|
readers[smallestKey].close();
|
||||||
numDone++;
|
numDone++;
|
||||||
|
@ -516,7 +528,7 @@ public class HStore {
|
||||||
|
|
||||||
// Now, write out an HSTORE_LOGINFOFILE for the brand-new TreeMap.
|
// Now, write out an HSTORE_LOGINFOFILE for the brand-new TreeMap.
|
||||||
|
|
||||||
if ((!deleteSequenceInfo) && maxSeenSeqID >= 0) {
|
if((! deleteSequenceInfo) && maxSeenSeqID >= 0) {
|
||||||
compactedOutputFile.writeInfo(fs, maxSeenSeqID);
|
compactedOutputFile.writeInfo(fs, maxSeenSeqID);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -529,7 +541,7 @@ public class HStore {
|
||||||
DataOutputStream out = new DataOutputStream(fs.create(filesToReplace));
|
DataOutputStream out = new DataOutputStream(fs.create(filesToReplace));
|
||||||
try {
|
try {
|
||||||
out.writeInt(toCompactFiles.size());
|
out.writeInt(toCompactFiles.size());
|
||||||
for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
hsf.write(out);
|
hsf.write(out);
|
||||||
}
|
}
|
||||||
|
@ -583,11 +595,11 @@ public class HStore {
|
||||||
|
|
||||||
// 1. Acquiring the write-lock
|
// 1. Acquiring the write-lock
|
||||||
|
|
||||||
locking.obtainWriteLock();
|
this.locker.writeLock().lock();
|
||||||
Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
|
Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
|
||||||
try {
|
try {
|
||||||
Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
|
Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
|
||||||
if (!fs.exists(doneFile)) {
|
if(! fs.exists(doneFile)) {
|
||||||
|
|
||||||
// The last execution didn't finish the compaction, so there's nothing
|
// The last execution didn't finish the compaction, so there's nothing
|
||||||
// we can do. We'll just have to redo it. Abandon it and return.
|
// we can do. We'll just have to redo it. Abandon it and return.
|
||||||
|
@ -622,18 +634,18 @@ public class HStore {
|
||||||
// 3. Unload all the replaced MapFiles.
|
// 3. Unload all the replaced MapFiles.
|
||||||
|
|
||||||
Iterator<HStoreFile> it2 = mapFiles.values().iterator();
|
Iterator<HStoreFile> it2 = mapFiles.values().iterator();
|
||||||
for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext();) {
|
for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext(); ) {
|
||||||
MapFile.Reader curReader = it.next();
|
MapFile.Reader curReader = it.next();
|
||||||
HStoreFile curMapFile = it2.next();
|
HStoreFile curMapFile = it2.next();
|
||||||
if (toCompactFiles.contains(curMapFile)) {
|
if(toCompactFiles.contains(curMapFile)) {
|
||||||
curReader.close();
|
curReader.close();
|
||||||
it.remove();
|
it.remove();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
|
||||||
HStoreFile curMapFile = it.next();
|
HStoreFile curMapFile = it.next();
|
||||||
if (toCompactFiles.contains(curMapFile)) {
|
if(toCompactFiles.contains(curMapFile)) {
|
||||||
it.remove();
|
it.remove();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -645,7 +657,7 @@ public class HStore {
|
||||||
|
|
||||||
// 4. Delete all the old files, no longer needed
|
// 4. Delete all the old files, no longer needed
|
||||||
|
|
||||||
for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
fs.delete(hsf.getMapFilePath());
|
fs.delete(hsf.getMapFilePath());
|
||||||
fs.delete(hsf.getInfoFilePath());
|
fs.delete(hsf.getInfoFilePath());
|
||||||
|
@ -683,13 +695,13 @@ public class HStore {
|
||||||
|
|
||||||
mapFiles.put(orderVal, finalCompactedFile);
|
mapFiles.put(orderVal, finalCompactedFile);
|
||||||
maps.put(orderVal, new MapFile.Reader(fs,
|
maps.put(orderVal, new MapFile.Reader(fs,
|
||||||
finalCompactedFile.getMapFilePath().toString(), conf));
|
finalCompactedFile.getMapFilePath().toString(), conf));
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
|
|
||||||
// 7. Releasing the write-lock
|
// 7. Releasing the write-lock
|
||||||
|
|
||||||
locking.releaseWriteLock();
|
this.locker.writeLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -705,7 +717,7 @@ public class HStore {
|
||||||
* The returned object should map column names to byte arrays (byte[]).
|
* The returned object should map column names to byte arrays (byte[]).
|
||||||
*/
|
*/
|
||||||
public void getFull(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
|
public void getFull(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
|
||||||
locking.obtainReadLock();
|
this.locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
MapFile.Reader[] maparray
|
MapFile.Reader[] maparray
|
||||||
= maps.values().toArray(new MapFile.Reader[maps.size()]);
|
= maps.values().toArray(new MapFile.Reader[maps.size()]);
|
||||||
|
@ -720,12 +732,12 @@ public class HStore {
|
||||||
|
|
||||||
do {
|
do {
|
||||||
Text readcol = readkey.getColumn();
|
Text readcol = readkey.getColumn();
|
||||||
if (results.get(readcol) == null
|
if(results.get(readcol) == null
|
||||||
&& key.matchesWithoutColumn(readkey)) {
|
&& key.matchesWithoutColumn(readkey)) {
|
||||||
results.put(new Text(readcol), readval.get());
|
results.put(new Text(readcol), readval.get());
|
||||||
readval = new BytesWritable();
|
readval = new BytesWritable();
|
||||||
|
|
||||||
} else if (key.getRow().compareTo(readkey.getRow()) > 0) {
|
} else if(key.getRow().compareTo(readkey.getRow()) > 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -734,7 +746,7 @@ public class HStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
this.locker.readLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -745,12 +757,12 @@ public class HStore {
|
||||||
* If 'numVersions' is negative, the method returns all available versions.
|
* If 'numVersions' is negative, the method returns all available versions.
|
||||||
*/
|
*/
|
||||||
public byte[][] get(HStoreKey key, int numVersions) throws IOException {
|
public byte[][] get(HStoreKey key, int numVersions) throws IOException {
|
||||||
if (numVersions == 0) {
|
if(numVersions == 0) {
|
||||||
throw new IllegalArgumentException("Must request at least one value.");
|
throw new IllegalArgumentException("Must request at least one value.");
|
||||||
}
|
}
|
||||||
|
|
||||||
Vector<byte[]> results = new Vector<byte[]>();
|
Vector<byte[]> results = new Vector<byte[]>();
|
||||||
locking.obtainReadLock();
|
this.locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
MapFile.Reader[] maparray
|
MapFile.Reader[] maparray
|
||||||
= maps.values().toArray(new MapFile.Reader[maps.size()]);
|
= maps.values().toArray(new MapFile.Reader[maps.size()]);
|
||||||
|
@ -763,12 +775,12 @@ public class HStore {
|
||||||
map.reset();
|
map.reset();
|
||||||
HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
|
HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
|
||||||
|
|
||||||
if (readkey.matchesRowCol(key)) {
|
if(readkey.matchesRowCol(key)) {
|
||||||
results.add(readval.get());
|
results.add(readval.get());
|
||||||
readval = new BytesWritable();
|
readval = new BytesWritable();
|
||||||
|
|
||||||
while(map.next(readkey, readval) && readkey.matchesRowCol(key)) {
|
while(map.next(readkey, readval) && readkey.matchesRowCol(key)) {
|
||||||
if (numVersions > 0 && (results.size() >= numVersions)) {
|
if(numVersions > 0 && (results.size() >= numVersions)) {
|
||||||
break;
|
break;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -778,12 +790,12 @@ public class HStore {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (results.size() >= numVersions) {
|
if(results.size() >= numVersions) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (results.size() == 0) {
|
if(results.size() == 0) {
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -791,7 +803,7 @@ public class HStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
this.locker.readLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -804,18 +816,23 @@ public class HStore {
|
||||||
*/
|
*/
|
||||||
public long getLargestFileSize(Text midKey) throws IOException {
|
public long getLargestFileSize(Text midKey) throws IOException {
|
||||||
long maxSize = 0L;
|
long maxSize = 0L;
|
||||||
|
if (this.mapFiles.size() <= 0) {
|
||||||
|
return maxSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
long mapIndex = 0L;
|
long mapIndex = 0L;
|
||||||
|
|
||||||
// Iterate through all the MapFiles
|
// Iterate through all the MapFiles
|
||||||
|
|
||||||
for(Iterator<Map.Entry<Long, HStoreFile>> it = mapFiles.entrySet().iterator();
|
for(Iterator<Map.Entry<Long, HStoreFile>> it = mapFiles.entrySet().iterator();
|
||||||
it.hasNext();) {
|
it.hasNext(); ) {
|
||||||
|
|
||||||
Map.Entry<Long, HStoreFile> e = it.next();
|
Map.Entry<Long, HStoreFile> e = it.next();
|
||||||
HStoreFile curHSF = e.getValue();
|
HStoreFile curHSF = e.getValue();
|
||||||
long size = fs.getLength(new Path(curHSF.getMapFilePath(), MapFile.DATA_FILE_NAME));
|
long size = fs.getLength(new Path(curHSF.getMapFilePath(), MapFile.DATA_FILE_NAME));
|
||||||
|
|
||||||
if (size > maxSize) { // This is the largest one so far
|
if(size > maxSize) { // This is the largest one so far
|
||||||
maxSize = size;
|
maxSize = size;
|
||||||
mapIndex = e.getKey();
|
mapIndex = e.getKey();
|
||||||
}
|
}
|
||||||
|
@ -850,7 +867,7 @@ public class HStore {
|
||||||
* These should be closed after the user is done with them.
|
* These should be closed after the user is done with them.
|
||||||
*/
|
*/
|
||||||
public HScannerInterface getScanner(long timestamp, Text targetCols[],
|
public HScannerInterface getScanner(long timestamp, Text targetCols[],
|
||||||
Text firstRow) throws IOException {
|
Text firstRow) throws IOException {
|
||||||
|
|
||||||
return new HStoreScanner(timestamp, targetCols, firstRow);
|
return new HStoreScanner(timestamp, targetCols, firstRow);
|
||||||
}
|
}
|
||||||
|
@ -867,11 +884,11 @@ public class HStore {
|
||||||
public HStoreScanner(long timestamp, Text targetCols[], Text firstRow) throws IOException {
|
public HStoreScanner(long timestamp, Text targetCols[], Text firstRow) throws IOException {
|
||||||
super(timestamp, targetCols);
|
super(timestamp, targetCols);
|
||||||
|
|
||||||
locking.obtainReadLock();
|
locker.readLock().lock();
|
||||||
try {
|
try {
|
||||||
this.readers = new MapFile.Reader[mapFiles.size()];
|
this.readers = new MapFile.Reader[mapFiles.size()];
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
|
||||||
HStoreFile curHSF = it.next();
|
HStoreFile curHSF = it.next();
|
||||||
readers[i++] = new MapFile.Reader(fs, curHSF.getMapFilePath().toString(), conf);
|
readers[i++] = new MapFile.Reader(fs, curHSF.getMapFilePath().toString(), conf);
|
||||||
}
|
}
|
||||||
|
@ -885,14 +902,14 @@ public class HStore {
|
||||||
keys[i] = new HStoreKey();
|
keys[i] = new HStoreKey();
|
||||||
vals[i] = new BytesWritable();
|
vals[i] = new BytesWritable();
|
||||||
|
|
||||||
if (firstRow.getLength() != 0) {
|
if(firstRow.getLength() != 0) {
|
||||||
if (findFirstRow(i, firstRow)) {
|
if(findFirstRow(i, firstRow)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while(getNext(i)) {
|
while(getNext(i)) {
|
||||||
if (columnMatch(i)) {
|
if(columnMatch(i)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -915,7 +932,7 @@ public class HStore {
|
||||||
HStoreKey firstKey
|
HStoreKey firstKey
|
||||||
= (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), vals[i]);
|
= (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), vals[i]);
|
||||||
|
|
||||||
if (firstKey == null) {
|
if(firstKey == null) {
|
||||||
|
|
||||||
// Didn't find it. Close the scanner and return TRUE
|
// Didn't find it. Close the scanner and return TRUE
|
||||||
|
|
||||||
|
@ -935,7 +952,7 @@ public class HStore {
|
||||||
* @return - true if there is more data available
|
* @return - true if there is more data available
|
||||||
*/
|
*/
|
||||||
boolean getNext(int i) throws IOException {
|
boolean getNext(int i) throws IOException {
|
||||||
if (!readers[i].next(keys[i], vals[i])) {
|
if(! readers[i].next(keys[i], vals[i])) {
|
||||||
closeSubScanner(i);
|
closeSubScanner(i);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -945,7 +962,7 @@ public class HStore {
|
||||||
/** Close down the indicated reader. */
|
/** Close down the indicated reader. */
|
||||||
void closeSubScanner(int i) throws IOException {
|
void closeSubScanner(int i) throws IOException {
|
||||||
try {
|
try {
|
||||||
if (readers[i] != null) {
|
if(readers[i] != null) {
|
||||||
readers[i].close();
|
readers[i].close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -958,16 +975,16 @@ public class HStore {
|
||||||
|
|
||||||
/** Shut it down! */
|
/** Shut it down! */
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
if (!scannerClosed) {
|
if(! scannerClosed) {
|
||||||
try {
|
try {
|
||||||
for(int i = 0; i < readers.length; i++) {
|
for(int i = 0; i < readers.length; i++) {
|
||||||
if (readers[i] != null) {
|
if(readers[i] != null) {
|
||||||
readers[i].close();
|
readers[i].close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
locking.releaseReadLock();
|
locker.readLock().unlock();
|
||||||
scannerClosed = true;
|
scannerClosed = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public HStoreFile(Configuration conf, Path dir, Text regionName,
|
public HStoreFile(Configuration conf, Path dir, Text regionName,
|
||||||
Text colFamily, long fileId) {
|
Text colFamily, long fileId) {
|
||||||
|
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.dir = dir;
|
this.dir = dir;
|
||||||
|
@ -92,12 +92,12 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
|
|
||||||
public Path getMapFilePath() {
|
public Path getMapFilePath() {
|
||||||
return new Path(HStoreFile.getMapDir(dir, regionName, colFamily),
|
return new Path(HStoreFile.getMapDir(dir, regionName, colFamily),
|
||||||
HSTORE_DATFILE_PREFIX + fileId);
|
HSTORE_DATFILE_PREFIX + fileId);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Path getInfoFilePath() {
|
public Path getInfoFilePath() {
|
||||||
return new Path(HStoreFile.getInfoDir(dir, regionName, colFamily),
|
return new Path(HStoreFile.getInfoDir(dir, regionName, colFamily),
|
||||||
HSTORE_INFOFILE_PREFIX + fileId);
|
HSTORE_INFOFILE_PREFIX + fileId);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Static methods to build partial paths to internal directories. Useful for
|
// Static methods to build partial paths to internal directories. Useful for
|
||||||
|
@ -105,17 +105,17 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
|
|
||||||
public static Path getMapDir(Path dir, Text regionName, Text colFamily) {
|
public static Path getMapDir(Path dir, Text regionName, Text colFamily) {
|
||||||
return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName,
|
return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName,
|
||||||
new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
|
new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Path getInfoDir(Path dir, Text regionName, Text colFamily) {
|
public static Path getInfoDir(Path dir, Text regionName, Text colFamily) {
|
||||||
return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName,
|
return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName,
|
||||||
new Path(colFamily.toString(), HSTORE_INFO_DIR)));
|
new Path(colFamily.toString(), HSTORE_INFO_DIR)));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Path getHStoreDir(Path dir, Text regionName, Text colFamily) {
|
public static Path getHStoreDir(Path dir, Text regionName, Text colFamily) {
|
||||||
return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName,
|
return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName,
|
||||||
colFamily.toString()));
|
colFamily.toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Path getHRegionDir(Path dir, Text regionName) {
|
public static Path getHRegionDir(Path dir, Text regionName) {
|
||||||
|
@ -127,7 +127,7 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
* filesystem if the file already exists.
|
* filesystem if the file already exists.
|
||||||
*/
|
*/
|
||||||
static HStoreFile obtainNewHStoreFile(Configuration conf, Path dir,
|
static HStoreFile obtainNewHStoreFile(Configuration conf, Path dir,
|
||||||
Text regionName, Text colFamily, FileSystem fs) throws IOException {
|
Text regionName, Text colFamily, FileSystem fs) throws IOException {
|
||||||
|
|
||||||
Path mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
|
Path mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
|
||||||
long fileId = Math.abs(rand.nextLong());
|
long fileId = Math.abs(rand.nextLong());
|
||||||
|
@ -149,7 +149,7 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
* If only one exists, we'll delete it.
|
* If only one exists, we'll delete it.
|
||||||
*/
|
*/
|
||||||
static Vector<HStoreFile> loadHStoreFiles(Configuration conf, Path dir,
|
static Vector<HStoreFile> loadHStoreFiles(Configuration conf, Path dir,
|
||||||
Text regionName, Text colFamily, FileSystem fs) throws IOException {
|
Text regionName, Text colFamily, FileSystem fs) throws IOException {
|
||||||
|
|
||||||
Vector<HStoreFile> results = new Vector<HStoreFile>();
|
Vector<HStoreFile> results = new Vector<HStoreFile>();
|
||||||
Path mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
|
Path mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
|
||||||
|
@ -158,13 +158,13 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
for(int i = 0; i < datfiles.length; i++) {
|
for(int i = 0; i < datfiles.length; i++) {
|
||||||
String name = datfiles[i].getName();
|
String name = datfiles[i].getName();
|
||||||
|
|
||||||
if (name.startsWith(HSTORE_DATFILE_PREFIX)) {
|
if(name.startsWith(HSTORE_DATFILE_PREFIX)) {
|
||||||
Long fileId = Long.parseLong(name.substring(HSTORE_DATFILE_PREFIX.length()));
|
Long fileId = Long.parseLong(name.substring(HSTORE_DATFILE_PREFIX.length()));
|
||||||
HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
|
HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
|
||||||
Path mapfile = curfile.getMapFilePath();
|
Path mapfile = curfile.getMapFilePath();
|
||||||
Path infofile = curfile.getInfoFilePath();
|
Path infofile = curfile.getInfoFilePath();
|
||||||
|
|
||||||
if (fs.exists(infofile)) {
|
if(fs.exists(infofile)) {
|
||||||
results.add(curfile);
|
results.add(curfile);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -178,12 +178,12 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
for(int i = 0; i < infofiles.length; i++) {
|
for(int i = 0; i < infofiles.length; i++) {
|
||||||
String name = infofiles[i].getName();
|
String name = infofiles[i].getName();
|
||||||
|
|
||||||
if (name.startsWith(HSTORE_INFOFILE_PREFIX)) {
|
if(name.startsWith(HSTORE_INFOFILE_PREFIX)) {
|
||||||
long fileId = Long.parseLong(name.substring(HSTORE_INFOFILE_PREFIX.length()));
|
long fileId = Long.parseLong(name.substring(HSTORE_INFOFILE_PREFIX.length()));
|
||||||
HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
|
HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
|
||||||
Path mapfile = curfile.getMapFilePath();
|
Path mapfile = curfile.getMapFilePath();
|
||||||
|
|
||||||
if (!fs.exists(mapfile)) {
|
if(! fs.exists(mapfile)) {
|
||||||
fs.delete(curfile.getInfoFilePath());
|
fs.delete(curfile.getInfoFilePath());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -200,18 +200,18 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
* brand-new HRegions.
|
* brand-new HRegions.
|
||||||
*/
|
*/
|
||||||
public void splitStoreFile(Text midKey, HStoreFile dstA, HStoreFile dstB,
|
public void splitStoreFile(Text midKey, HStoreFile dstA, HStoreFile dstB,
|
||||||
FileSystem fs, Configuration conf) throws IOException {
|
FileSystem fs, Configuration conf) throws IOException {
|
||||||
|
|
||||||
// Copy the appropriate tuples to one MapFile or the other.
|
// Copy the appropriate tuples to one MapFile or the other.
|
||||||
|
|
||||||
MapFile.Reader in = new MapFile.Reader(fs, getMapFilePath().toString(), conf);
|
MapFile.Reader in = new MapFile.Reader(fs, getMapFilePath().toString(), conf);
|
||||||
try {
|
try {
|
||||||
MapFile.Writer outA = new MapFile.Writer(conf, fs,
|
MapFile.Writer outA = new MapFile.Writer(conf, fs,
|
||||||
dstA.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
|
dstA.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
MapFile.Writer outB = new MapFile.Writer(conf, fs,
|
MapFile.Writer outB = new MapFile.Writer(conf, fs,
|
||||||
dstB.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
|
dstB.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
HStoreKey readkey = new HStoreKey();
|
HStoreKey readkey = new HStoreKey();
|
||||||
|
@ -220,7 +220,7 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
while(in.next(readkey, readval)) {
|
while(in.next(readkey, readval)) {
|
||||||
Text key = readkey.getRow();
|
Text key = readkey.getRow();
|
||||||
|
|
||||||
if (key.compareTo(midKey) < 0) {
|
if(key.compareTo(midKey) < 0) {
|
||||||
outA.append(readkey, readval);
|
outA.append(readkey, readval);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -252,15 +252,15 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
* We are merging multiple regions into a single new one.
|
* We are merging multiple regions into a single new one.
|
||||||
*/
|
*/
|
||||||
public void mergeStoreFiles(Vector<HStoreFile> srcFiles, FileSystem fs,
|
public void mergeStoreFiles(Vector<HStoreFile> srcFiles, FileSystem fs,
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
|
|
||||||
// Copy all the source MapFile tuples into this HSF's MapFile
|
// Copy all the source MapFile tuples into this HSF's MapFile
|
||||||
|
|
||||||
MapFile.Writer out = new MapFile.Writer(conf, fs, getMapFilePath().toString(),
|
MapFile.Writer out = new MapFile.Writer(conf, fs, getMapFilePath().toString(),
|
||||||
HStoreKey.class, BytesWritable.class);
|
HStoreKey.class, BytesWritable.class);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile src = it.next();
|
HStoreFile src = it.next();
|
||||||
MapFile.Reader in = new MapFile.Reader(fs, src.getMapFilePath().toString(), conf);
|
MapFile.Reader in = new MapFile.Reader(fs, src.getMapFilePath().toString(), conf);
|
||||||
|
|
||||||
|
@ -283,11 +283,11 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
// Build a unified InfoFile from the source InfoFiles.
|
// Build a unified InfoFile from the source InfoFiles.
|
||||||
|
|
||||||
long unifiedSeqId = -1;
|
long unifiedSeqId = -1;
|
||||||
for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext();) {
|
for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
long curSeqId = hsf.loadInfo(fs);
|
long curSeqId = hsf.loadInfo(fs);
|
||||||
|
|
||||||
if (curSeqId > unifiedSeqId) {
|
if(curSeqId > unifiedSeqId) {
|
||||||
unifiedSeqId = curSeqId;
|
unifiedSeqId = curSeqId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -301,7 +301,7 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
byte flag = in.readByte();
|
byte flag = in.readByte();
|
||||||
if (flag == INFO_SEQ_NUM) {
|
if(flag == INFO_SEQ_NUM) {
|
||||||
return in.readLong();
|
return in.readLong();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -352,17 +352,17 @@ public class HStoreFile implements HConstants, WritableComparable {
|
||||||
public int compareTo(Object o) {
|
public int compareTo(Object o) {
|
||||||
HStoreFile other = (HStoreFile) o;
|
HStoreFile other = (HStoreFile) o;
|
||||||
int result = this.dir.compareTo(other.dir);
|
int result = this.dir.compareTo(other.dir);
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
this.regionName.compareTo(other.regionName);
|
this.regionName.compareTo(other.regionName);
|
||||||
}
|
}
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
result = this.colFamily.compareTo(other.colFamily);
|
result = this.colFamily.compareTo(other.colFamily);
|
||||||
}
|
}
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
if (this.fileId < other.fileId) {
|
if(this.fileId < other.fileId) {
|
||||||
result = -1;
|
result = -1;
|
||||||
|
|
||||||
} else if (this.fileId > other.fileId) {
|
} else if(this.fileId > other.fileId) {
|
||||||
result = 1;
|
result = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ public class HStoreKey implements WritableComparable {
|
||||||
public static Text extractFamily(Text col) throws IOException {
|
public static Text extractFamily(Text col) throws IOException {
|
||||||
String column = col.toString();
|
String column = col.toString();
|
||||||
int colpos = column.indexOf(":");
|
int colpos = column.indexOf(":");
|
||||||
if (colpos < 0) {
|
if(colpos < 0) {
|
||||||
throw new IllegalArgumentException("Illegal column name has no family indicator: " + column);
|
throw new IllegalArgumentException("Illegal column name has no family indicator: " + column);
|
||||||
}
|
}
|
||||||
return new Text(column.substring(0, colpos));
|
return new Text(column.substring(0, colpos));
|
||||||
|
@ -93,8 +93,13 @@ public class HStoreKey implements WritableComparable {
|
||||||
return timestamp;
|
return timestamp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param other Key to compare against. Compares row and column.
|
||||||
|
* @return True if same row and column.
|
||||||
|
* @see {@link #matchesWithoutColumn(HStoreKey)}
|
||||||
|
*/
|
||||||
public boolean matchesRowCol(HStoreKey other) {
|
public boolean matchesRowCol(HStoreKey other) {
|
||||||
if (this.row.compareTo(other.row) == 0 &&
|
if(this.row.compareTo(other.row) == 0 &&
|
||||||
this.column.compareTo(other.column) == 0) {
|
this.column.compareTo(other.column) == 0) {
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
@ -103,8 +108,15 @@ public class HStoreKey implements WritableComparable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param other Key to copmare against. Compares row and
|
||||||
|
* timestamp.
|
||||||
|
* @return True if same row and timestamp is greater than
|
||||||
|
* <code>other</code>
|
||||||
|
* @see {@link #matchesRowCol(HStoreKey)}
|
||||||
|
*/
|
||||||
public boolean matchesWithoutColumn(HStoreKey other) {
|
public boolean matchesWithoutColumn(HStoreKey other) {
|
||||||
if ((this.row.compareTo(other.row) == 0) &&
|
if((this.row.compareTo(other.row) == 0) &&
|
||||||
(this.timestamp >= other.getTimestamp())) {
|
(this.timestamp >= other.getTimestamp())) {
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
@ -124,14 +136,14 @@ public class HStoreKey implements WritableComparable {
|
||||||
public int compareTo(Object o) {
|
public int compareTo(Object o) {
|
||||||
HStoreKey other = (HStoreKey) o;
|
HStoreKey other = (HStoreKey) o;
|
||||||
int result = this.row.compareTo(other.row);
|
int result = this.row.compareTo(other.row);
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
result = this.column.compareTo(other.column);
|
result = this.column.compareTo(other.column);
|
||||||
|
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
if (this.timestamp < other.timestamp) {
|
if(this.timestamp < other.timestamp) {
|
||||||
result = 1;
|
result = 1;
|
||||||
|
|
||||||
} else if (this.timestamp > other.timestamp) {
|
} else if(this.timestamp > other.timestamp) {
|
||||||
result = -1;
|
result = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ public class HTableDescriptor implements WritableComparable {
|
||||||
|
|
||||||
/** Do we contain a given column? */
|
/** Do we contain a given column? */
|
||||||
public boolean hasFamily(Text family) {
|
public boolean hasFamily(Text family) {
|
||||||
if (families.contains(family)) {
|
if(families.contains(family)) {
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -75,7 +75,7 @@ public class HTableDescriptor implements WritableComparable {
|
||||||
name.write(out);
|
name.write(out);
|
||||||
out.writeInt(maxVersions);
|
out.writeInt(maxVersions);
|
||||||
out.writeInt(families.size());
|
out.writeInt(families.size());
|
||||||
for(Iterator<Text> it = families.iterator(); it.hasNext();) {
|
for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
|
||||||
it.next().write(out);
|
it.next().write(out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -99,21 +99,21 @@ public class HTableDescriptor implements WritableComparable {
|
||||||
public int compareTo(Object o) {
|
public int compareTo(Object o) {
|
||||||
HTableDescriptor htd = (HTableDescriptor) o;
|
HTableDescriptor htd = (HTableDescriptor) o;
|
||||||
int result = name.compareTo(htd.name);
|
int result = name.compareTo(htd.name);
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
result = maxVersions - htd.maxVersions;
|
result = maxVersions - htd.maxVersions;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
result = families.size() - htd.families.size();
|
result = families.size() - htd.families.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result == 0) {
|
if(result == 0) {
|
||||||
Iterator<Text> it2 = htd.families.iterator();
|
Iterator<Text> it2 = htd.families.iterator();
|
||||||
for(Iterator<Text> it = families.iterator(); it.hasNext();) {
|
for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
|
||||||
Text family1 = it.next();
|
Text family1 = it.next();
|
||||||
Text family2 = it2.next();
|
Text family2 = it2.next();
|
||||||
result = family1.compareTo(family2);
|
result = family1.compareTo(family2);
|
||||||
if (result != 0) {
|
if(result != 0) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ public class LabelledData implements Writable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public LabelledData(Text label, byte[] data) {
|
public LabelledData(Text label, byte[] data) {
|
||||||
this.label.set(label);
|
this.label = new Text(label);
|
||||||
this.data = new BytesWritable(data);
|
this.data = new BytesWritable(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ public class LabelledData implements Writable {
|
||||||
return label;
|
return label;
|
||||||
}
|
}
|
||||||
|
|
||||||
public BytesWritable getDat() {
|
public BytesWritable getData() {
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@ public class Leases {
|
||||||
synchronized(sortedLeases) {
|
synchronized(sortedLeases) {
|
||||||
Lease lease = new Lease(holderId, resourceId, listener);
|
Lease lease = new Lease(holderId, resourceId, listener);
|
||||||
Text leaseId = lease.getLeaseId();
|
Text leaseId = lease.getLeaseId();
|
||||||
if (leases.get(leaseId) != null) {
|
if(leases.get(leaseId) != null) {
|
||||||
throw new IOException("Impossible state for createLease(): Lease for holderId " + holderId + " and resourceId " + resourceId + " is still held.");
|
throw new IOException("Impossible state for createLease(): Lease for holderId " + holderId + " and resourceId " + resourceId + " is still held.");
|
||||||
}
|
}
|
||||||
leases.put(leaseId, lease);
|
leases.put(leaseId, lease);
|
||||||
|
@ -92,7 +92,7 @@ public class Leases {
|
||||||
synchronized(sortedLeases) {
|
synchronized(sortedLeases) {
|
||||||
Text leaseId = createLeaseId(holderId, resourceId);
|
Text leaseId = createLeaseId(holderId, resourceId);
|
||||||
Lease lease = leases.get(leaseId);
|
Lease lease = leases.get(leaseId);
|
||||||
if (lease == null) {
|
if(lease == null) {
|
||||||
|
|
||||||
// It's possible that someone tries to renew the lease, but
|
// It's possible that someone tries to renew the lease, but
|
||||||
// it just expired a moment ago. So fail.
|
// it just expired a moment ago. So fail.
|
||||||
|
@ -113,7 +113,7 @@ public class Leases {
|
||||||
synchronized(sortedLeases) {
|
synchronized(sortedLeases) {
|
||||||
Text leaseId = createLeaseId(holderId, resourceId);
|
Text leaseId = createLeaseId(holderId, resourceId);
|
||||||
Lease lease = leases.get(leaseId);
|
Lease lease = leases.get(leaseId);
|
||||||
if (lease == null) {
|
if(lease == null) {
|
||||||
|
|
||||||
// It's possible that someone tries to renew the lease, but
|
// It's possible that someone tries to renew the lease, but
|
||||||
// it just expired a moment ago. So fail.
|
// it just expired a moment ago. So fail.
|
||||||
|
@ -137,9 +137,9 @@ public class Leases {
|
||||||
synchronized(sortedLeases) {
|
synchronized(sortedLeases) {
|
||||||
Lease top;
|
Lease top;
|
||||||
while((sortedLeases.size() > 0)
|
while((sortedLeases.size() > 0)
|
||||||
&& ((top = sortedLeases.first()) != null)) {
|
&& ((top = sortedLeases.first()) != null)) {
|
||||||
|
|
||||||
if (top.shouldExpire()) {
|
if(top.shouldExpire()) {
|
||||||
leases.remove(top.getLeaseId());
|
leases.remove(top.getLeaseId());
|
||||||
sortedLeases.remove(top);
|
sortedLeases.remove(top);
|
||||||
|
|
||||||
|
@ -205,10 +205,10 @@ public class Leases {
|
||||||
|
|
||||||
public int compareTo(Object o) {
|
public int compareTo(Object o) {
|
||||||
Lease other = (Lease) o;
|
Lease other = (Lease) o;
|
||||||
if (this.lastUpdate < other.lastUpdate) {
|
if(this.lastUpdate < other.lastUpdate) {
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
} else if (this.lastUpdate > other.lastUpdate) {
|
} else if(this.lastUpdate > other.lastUpdate) {
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2007 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class LockException extends IOException {
|
||||||
|
public LockException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public LockException(String s) {
|
||||||
|
super(s);
|
||||||
|
}
|
||||||
|
}
|
|
@ -29,27 +29,27 @@ public class Environment {
|
||||||
String value = null;
|
String value = null;
|
||||||
|
|
||||||
value = System.getenv("DEBUGGING");
|
value = System.getenv("DEBUGGING");
|
||||||
if (value != null && value.equalsIgnoreCase("TRUE")) {
|
if(value != null && value.equalsIgnoreCase("TRUE")) {
|
||||||
debugging = true;
|
debugging = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
value = System.getenv("LOGGING_LEVEL");
|
value = System.getenv("LOGGING_LEVEL");
|
||||||
if (value != null && value.length() != 0) {
|
if(value != null && value.length() != 0) {
|
||||||
if (value.equalsIgnoreCase("ALL")) {
|
if(value.equalsIgnoreCase("ALL")) {
|
||||||
logLevel = Level.ALL;
|
logLevel = Level.ALL;
|
||||||
} else if (value.equalsIgnoreCase("DEBUG")) {
|
} else if(value.equalsIgnoreCase("DEBUG")) {
|
||||||
logLevel = Level.DEBUG;
|
logLevel = Level.DEBUG;
|
||||||
} else if (value.equalsIgnoreCase("ERROR")) {
|
} else if(value.equalsIgnoreCase("ERROR")) {
|
||||||
logLevel = Level.ERROR;
|
logLevel = Level.ERROR;
|
||||||
} else if (value.equalsIgnoreCase("FATAL")) {
|
} else if(value.equalsIgnoreCase("FATAL")) {
|
||||||
logLevel = Level.FATAL;
|
logLevel = Level.FATAL;
|
||||||
} else if (value.equalsIgnoreCase("INFO")) {
|
} else if(value.equalsIgnoreCase("INFO")) {
|
||||||
logLevel = Level.INFO;
|
logLevel = Level.INFO;
|
||||||
} else if (value.equalsIgnoreCase("OFF")) {
|
} else if(value.equalsIgnoreCase("OFF")) {
|
||||||
logLevel = Level.OFF;
|
logLevel = Level.OFF;
|
||||||
} else if (value.equalsIgnoreCase("TRACE")) {
|
} else if(value.equalsIgnoreCase("TRACE")) {
|
||||||
logLevel = Level.TRACE;
|
logLevel = Level.TRACE;
|
||||||
} else if (value.equalsIgnoreCase("WARN")) {
|
} else if(value.equalsIgnoreCase("WARN")) {
|
||||||
logLevel = Level.WARN;
|
logLevel = Level.WARN;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,298 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2006 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class creates a single process HBase cluster for junit testing.
|
||||||
|
* One thread is created for each server.
|
||||||
|
*/
|
||||||
|
public class MiniHBaseCluster implements HConstants {
|
||||||
|
private Configuration conf;
|
||||||
|
private MiniDFSCluster cluster;
|
||||||
|
private FileSystem fs;
|
||||||
|
private Path parentdir;
|
||||||
|
private HMasterRunner master;
|
||||||
|
private Thread masterThread;
|
||||||
|
private HRegionServerRunner[] regionServers;
|
||||||
|
private Thread[] regionThreads;
|
||||||
|
|
||||||
|
public MiniHBaseCluster(Configuration conf, int nRegionNodes) {
|
||||||
|
this.conf = conf;
|
||||||
|
|
||||||
|
try {
|
||||||
|
try {
|
||||||
|
if(System.getProperty("test.build.data") == null) {
|
||||||
|
File testDir = new File(new File("").getAbsolutePath(),
|
||||||
|
"build/contrib/hbase/test");
|
||||||
|
|
||||||
|
String dir = testDir.getAbsolutePath();
|
||||||
|
System.out.println(dir);
|
||||||
|
System.setProperty("test.build.data", dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
// To run using configured filesystem, comment out this
|
||||||
|
// line below that starts up the MiniDFSCluster.
|
||||||
|
this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
|
||||||
|
this.fs = FileSystem.get(conf);
|
||||||
|
this.parentdir =
|
||||||
|
new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR));
|
||||||
|
fs.mkdirs(parentdir);
|
||||||
|
|
||||||
|
} catch(Throwable e) {
|
||||||
|
System.err.println("Mini DFS cluster failed to start");
|
||||||
|
e.printStackTrace();
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(this.conf.get(MASTER_ADDRESS) == null) {
|
||||||
|
this.conf.set(MASTER_ADDRESS, "localhost:0");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the master
|
||||||
|
|
||||||
|
this.master = new HMasterRunner();
|
||||||
|
this.masterThread = new Thread(master, "HMaster");
|
||||||
|
|
||||||
|
// Start up the master
|
||||||
|
|
||||||
|
masterThread.start();
|
||||||
|
while(! master.isCrashed() && ! master.isInitialized()) {
|
||||||
|
try {
|
||||||
|
System.err.println("Waiting for HMaster to initialize...");
|
||||||
|
Thread.sleep(1000);
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
if(master.isCrashed()) {
|
||||||
|
throw new RuntimeException("HMaster crashed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the master's port for the HRegionServers
|
||||||
|
|
||||||
|
this.conf.set(MASTER_ADDRESS, master.getHMasterAddress().toString());
|
||||||
|
|
||||||
|
// Start the HRegionServers
|
||||||
|
|
||||||
|
if(this.conf.get(REGIONSERVER_ADDRESS) == null) {
|
||||||
|
this.conf.set(REGIONSERVER_ADDRESS, "localhost:0");
|
||||||
|
}
|
||||||
|
|
||||||
|
startRegionServers(this.conf, nRegionNodes);
|
||||||
|
|
||||||
|
// Wait for things to get started
|
||||||
|
|
||||||
|
while(! master.isCrashed() && ! master.isUp()) {
|
||||||
|
try {
|
||||||
|
System.err.println("Waiting for Mini HBase cluster to start...");
|
||||||
|
Thread.sleep(1000);
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
if(master.isCrashed()) {
|
||||||
|
throw new RuntimeException("HMaster crashed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch(Throwable e) {
|
||||||
|
|
||||||
|
// Delete all DFS files
|
||||||
|
|
||||||
|
deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
|
||||||
|
|
||||||
|
throw new RuntimeException("Mini HBase cluster did not start");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void startRegionServers(Configuration conf, int nRegionNodes) {
|
||||||
|
this.regionServers = new HRegionServerRunner[nRegionNodes];
|
||||||
|
this.regionThreads = new Thread[nRegionNodes];
|
||||||
|
|
||||||
|
for(int i = 0; i < nRegionNodes; i++) {
|
||||||
|
regionServers[i] = new HRegionServerRunner(conf);
|
||||||
|
regionThreads[i] = new Thread(regionServers[i], "HRegionServer-" + i);
|
||||||
|
regionThreads[i].start();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the rpc address actually used by the master server, because the
|
||||||
|
* supplied port is not necessarily the actual port used.
|
||||||
|
*/
|
||||||
|
public HServerAddress getHMasterAddress() {
|
||||||
|
return master.getHMasterAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Shut down the HBase cluster */
|
||||||
|
public void shutdown() {
|
||||||
|
System.out.println("Shutting down the HBase Cluster");
|
||||||
|
for(int i = 0; i < regionServers.length; i++) {
|
||||||
|
regionServers[i].shutdown();
|
||||||
|
}
|
||||||
|
master.shutdown();
|
||||||
|
|
||||||
|
for(int i = 0; i < regionServers.length; i++) {
|
||||||
|
try {
|
||||||
|
regionThreads[i].join();
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
masterThread.join();
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
|
||||||
|
System.out.println("Shutting down Mini DFS cluster");
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all DFS files
|
||||||
|
|
||||||
|
deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private void deleteFile(File f) {
|
||||||
|
if(f.isDirectory()) {
|
||||||
|
File[] children = f.listFiles();
|
||||||
|
for(int i = 0; i < children.length; i++) {
|
||||||
|
deleteFile(children[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.delete();
|
||||||
|
}
|
||||||
|
|
||||||
|
private class HMasterRunner implements Runnable {
|
||||||
|
private HMaster master = null;
|
||||||
|
private volatile boolean isInitialized = false;
|
||||||
|
private boolean isCrashed = false;
|
||||||
|
private boolean isRunning = true;
|
||||||
|
|
||||||
|
public HServerAddress getHMasterAddress() {
|
||||||
|
return master.getMasterAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized boolean isInitialized() {
|
||||||
|
return isInitialized;
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized boolean isCrashed() {
|
||||||
|
return isCrashed;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isUp() {
|
||||||
|
if(master == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
synchronized(this) {
|
||||||
|
return isInitialized;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Create the HMaster and run it */
|
||||||
|
public void run() {
|
||||||
|
try {
|
||||||
|
synchronized(this) {
|
||||||
|
if(isRunning) {
|
||||||
|
master = new HMaster(conf);
|
||||||
|
}
|
||||||
|
isInitialized = true;
|
||||||
|
}
|
||||||
|
} catch(Throwable e) {
|
||||||
|
shutdown();
|
||||||
|
System.err.println("HMaster crashed:");
|
||||||
|
e.printStackTrace();
|
||||||
|
synchronized(this) {
|
||||||
|
isCrashed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Shut down the HMaster and wait for it to finish */
|
||||||
|
public synchronized void shutdown() {
|
||||||
|
isRunning = false;
|
||||||
|
if(master != null) {
|
||||||
|
try {
|
||||||
|
master.stop();
|
||||||
|
|
||||||
|
} catch(IOException e) {
|
||||||
|
System.err.println("Master crashed during stop");
|
||||||
|
e.printStackTrace();
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
master.join();
|
||||||
|
master = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class HRegionServerRunner implements Runnable {
|
||||||
|
private HRegionServer server = null;
|
||||||
|
private boolean isRunning = true;
|
||||||
|
private Configuration conf;
|
||||||
|
|
||||||
|
public HRegionServerRunner(Configuration conf) {
|
||||||
|
this.conf = conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Start up the HRegionServer */
|
||||||
|
public void run() {
|
||||||
|
try {
|
||||||
|
synchronized(this) {
|
||||||
|
if(isRunning) {
|
||||||
|
server = new HRegionServer(conf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
server.run();
|
||||||
|
|
||||||
|
} catch(Throwable e) {
|
||||||
|
shutdown();
|
||||||
|
System.err.println("HRegionServer crashed:");
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Shut down the HRegionServer */
|
||||||
|
public synchronized void shutdown() {
|
||||||
|
isRunning = false;
|
||||||
|
if(server != null) {
|
||||||
|
try {
|
||||||
|
server.stop();
|
||||||
|
|
||||||
|
} catch(IOException e) {
|
||||||
|
System.err.println("HRegionServer crashed during stop");
|
||||||
|
e.printStackTrace();
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
server.join();
|
||||||
|
server = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,191 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2007 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
|
||||||
|
import junit.framework.TestCase;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hbase.HMemcache.Snapshot;
|
||||||
|
import org.apache.hadoop.io.BytesWritable;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
public class TestHMemcache extends TestCase {
|
||||||
|
private final Logger LOG =
|
||||||
|
Logger.getLogger(this.getClass().getName());
|
||||||
|
|
||||||
|
private HMemcache hmemcache;
|
||||||
|
|
||||||
|
private Configuration conf;
|
||||||
|
|
||||||
|
private static final int ROW_COUNT = 3;
|
||||||
|
|
||||||
|
private static final int COLUMNS_COUNT = 3;
|
||||||
|
|
||||||
|
private static final String COLUMN_FAMILY = "column";
|
||||||
|
|
||||||
|
protected void setUp() throws Exception {
|
||||||
|
super.setUp();
|
||||||
|
|
||||||
|
this.hmemcache = new HMemcache();
|
||||||
|
|
||||||
|
// Set up a configuration that has configuration for a file
|
||||||
|
// filesystem implementation.
|
||||||
|
this.conf = new HBaseConfiguration();
|
||||||
|
// The test hadoop-site.xml doesn't have a default file fs
|
||||||
|
// implementation. Remove below when gets added.
|
||||||
|
this.conf.set("fs.file.impl",
|
||||||
|
"org.apache.hadoop.fs.LocalFileSystem");
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void tearDown() throws Exception {
|
||||||
|
super.tearDown();
|
||||||
|
}
|
||||||
|
|
||||||
|
private Text getRowName(final int index) {
|
||||||
|
return new Text("row" + Integer.toString(index));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Text getColumnName(final int rowIndex,
|
||||||
|
final int colIndex) {
|
||||||
|
return new Text(COLUMN_FAMILY + ":" +
|
||||||
|
Integer.toString(rowIndex) + ";" +
|
||||||
|
Integer.toString(colIndex));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT}
|
||||||
|
* @param hmc Instance to add rows to.
|
||||||
|
*/
|
||||||
|
private void addRows(final HMemcache hmc) {
|
||||||
|
for (int i = 0; i < ROW_COUNT; i++) {
|
||||||
|
TreeMap<Text, byte[]> columns = new TreeMap<Text, byte[]>();
|
||||||
|
for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
|
||||||
|
Text k = getColumnName(i, ii);
|
||||||
|
columns.put(k, k.toString().getBytes());
|
||||||
|
}
|
||||||
|
hmc.add(getRowName(i), columns, System.currentTimeMillis());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private HLog getLogfile() throws IOException {
|
||||||
|
// Create a log file.
|
||||||
|
Path testDir = new Path(conf.get("hadoop.tmp.dir", System
|
||||||
|
.getProperty("java.tmp.dir")), "hbase");
|
||||||
|
Path logFile = new Path(testDir, this.getName());
|
||||||
|
FileSystem fs = testDir.getFileSystem(conf);
|
||||||
|
// Cleanup any old log file.
|
||||||
|
if (fs.exists(logFile)) {
|
||||||
|
fs.delete(logFile);
|
||||||
|
}
|
||||||
|
return new HLog(fs, logFile, this.conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Snapshot runSnapshot(final HMemcache hmc, final HLog log)
|
||||||
|
throws IOException {
|
||||||
|
// Save off old state.
|
||||||
|
int oldHistorySize = hmc.history.size();
|
||||||
|
TreeMap<HStoreKey, BytesWritable> oldMemcache = hmc.memcache;
|
||||||
|
// Run snapshot.
|
||||||
|
Snapshot s = hmc.snapshotMemcacheForLog(log);
|
||||||
|
// Make some assertions about what just happened.
|
||||||
|
assertEquals("Snapshot equals old memcache", hmc.snapshot,
|
||||||
|
oldMemcache);
|
||||||
|
assertEquals("Returned snapshot holds old memcache",
|
||||||
|
s.memcacheSnapshot, oldMemcache);
|
||||||
|
assertEquals("History has been incremented",
|
||||||
|
oldHistorySize + 1, hmc.history.size());
|
||||||
|
assertEquals("History holds old snapshot",
|
||||||
|
hmc.history.get(oldHistorySize), oldMemcache);
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSnapshotting() throws IOException {
|
||||||
|
final int snapshotCount = 5;
|
||||||
|
final Text tableName = new Text(getName());
|
||||||
|
HLog log = getLogfile();
|
||||||
|
try {
|
||||||
|
// Add some rows, run a snapshot. Do it a few times.
|
||||||
|
for (int i = 0; i < snapshotCount; i++) {
|
||||||
|
addRows(this.hmemcache);
|
||||||
|
Snapshot s = runSnapshot(this.hmemcache, log);
|
||||||
|
log.completeCacheFlush(new Text(Integer.toString(i)),
|
||||||
|
tableName, s.sequenceId);
|
||||||
|
// Clean up snapshot now we are done with it.
|
||||||
|
this.hmemcache.deleteSnapshot();
|
||||||
|
}
|
||||||
|
log.close();
|
||||||
|
} finally {
|
||||||
|
log.dir.getFileSystem(this.conf).delete(log.dir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void isExpectedRow(final int rowIndex,
|
||||||
|
TreeMap<Text, byte[]> row) {
|
||||||
|
int i = 0;
|
||||||
|
for (Text colname: row.keySet()) {
|
||||||
|
String expectedColname =
|
||||||
|
getColumnName(rowIndex, i++).toString();
|
||||||
|
String colnameStr = colname.toString();
|
||||||
|
assertEquals("Column name", colnameStr, expectedColname);
|
||||||
|
// Value is column name as bytes. Usually result is
|
||||||
|
// 100 bytes in size at least. This is the default size
|
||||||
|
// for BytesWriteable. For comparison, comvert bytes to
|
||||||
|
// String and trim to remove trailing null bytes.
|
||||||
|
String colvalueStr =
|
||||||
|
new String(row.get(colname)).trim();
|
||||||
|
assertEquals("Content", colnameStr, colvalueStr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testGetFull() throws IOException {
|
||||||
|
addRows(this.hmemcache);
|
||||||
|
for (int i = 0; i < ROW_COUNT; i++) {
|
||||||
|
HStoreKey hsk = new HStoreKey(getRowName(i));
|
||||||
|
TreeMap<Text, byte[]> all = this.hmemcache.getFull(hsk);
|
||||||
|
isExpectedRow(i, all);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testScanner() throws IOException {
|
||||||
|
addRows(this.hmemcache);
|
||||||
|
long timestamp = System.currentTimeMillis();
|
||||||
|
Text [] cols = new Text[COLUMNS_COUNT * ROW_COUNT];
|
||||||
|
for (int i = 0; i < ROW_COUNT; i++) {
|
||||||
|
for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
|
||||||
|
cols[(ii + (i * COLUMNS_COUNT))] = getColumnName(i, ii);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
HScannerInterface scanner =
|
||||||
|
this.hmemcache.getScanner(timestamp, cols, new Text());
|
||||||
|
HStoreKey key = new HStoreKey();
|
||||||
|
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||||
|
for (int i = 0; scanner.next(key, results); i++) {
|
||||||
|
assertTrue("Row name",
|
||||||
|
key.toString().startsWith(getRowName(i).toString()));
|
||||||
|
assertEquals("Count of columns", COLUMNS_COUNT,
|
||||||
|
results.size());
|
||||||
|
isExpectedRow(i, results);
|
||||||
|
// Clear out set. Otherwise row results accumulate.
|
||||||
|
results.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,7 +21,10 @@ import junit.framework.TestSuite;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Enumeration;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -30,6 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
|
||||||
|
import org.apache.log4j.Appender;
|
||||||
|
import org.apache.log4j.ConsoleAppender;
|
||||||
|
import org.apache.log4j.Layout;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.apache.log4j.PatternLayout;
|
import org.apache.log4j.PatternLayout;
|
||||||
|
@ -41,6 +47,7 @@ import org.apache.log4j.PatternLayout;
|
||||||
* HRegions or in the HBaseMaster, so only basic testing is possible.
|
* HRegions or in the HBaseMaster, so only basic testing is possible.
|
||||||
*/
|
*/
|
||||||
public class TestHRegion extends TestCase {
|
public class TestHRegion extends TestCase {
|
||||||
|
private Logger LOG = Logger.getLogger(this.getClass().getName());
|
||||||
|
|
||||||
/** Constructor */
|
/** Constructor */
|
||||||
public TestHRegion(String name) {
|
public TestHRegion(String name) {
|
||||||
|
@ -51,6 +58,8 @@ public class TestHRegion extends TestCase {
|
||||||
public static Test suite() {
|
public static Test suite() {
|
||||||
TestSuite suite = new TestSuite();
|
TestSuite suite = new TestSuite();
|
||||||
suite.addTest(new TestHRegion("testSetup"));
|
suite.addTest(new TestHRegion("testSetup"));
|
||||||
|
suite.addTest(new TestHRegion("testLocks"));
|
||||||
|
suite.addTest(new TestHRegion("testBadPuts"));
|
||||||
suite.addTest(new TestHRegion("testBasic"));
|
suite.addTest(new TestHRegion("testBasic"));
|
||||||
suite.addTest(new TestHRegion("testScan"));
|
suite.addTest(new TestHRegion("testScan"));
|
||||||
suite.addTest(new TestHRegion("testBatchWrite"));
|
suite.addTest(new TestHRegion("testBatchWrite"));
|
||||||
|
@ -61,7 +70,7 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private static final int FIRST_ROW = 0;
|
private static final int FIRST_ROW = 1;
|
||||||
private static final int N_ROWS = 1000000;
|
private static final int N_ROWS = 1000000;
|
||||||
private static final int NUM_VALS = 1000;
|
private static final int NUM_VALS = 1000;
|
||||||
private static final Text CONTENTS_BASIC = new Text("contents:basic");
|
private static final Text CONTENTS_BASIC = new Text("contents:basic");
|
||||||
|
@ -88,28 +97,42 @@ public class TestHRegion extends TestCase {
|
||||||
|
|
||||||
// Set up environment, start mini cluster, etc.
|
// Set up environment, start mini cluster, etc.
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
public void testSetup() throws IOException {
|
public void testSetup() throws IOException {
|
||||||
try {
|
try {
|
||||||
if (System.getProperty("test.build.data") == null) {
|
if(System.getProperty("test.build.data") == null) {
|
||||||
String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
|
String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
|
||||||
System.out.println(dir);
|
System.out.println(dir);
|
||||||
System.setProperty("test.build.data", dir);
|
System.setProperty("test.build.data", dir);
|
||||||
}
|
}
|
||||||
conf = new Configuration();
|
conf = new HBaseConfiguration();
|
||||||
|
|
||||||
Environment.getenv();
|
Environment.getenv();
|
||||||
if (Environment.debugging) {
|
if(Environment.debugging) {
|
||||||
Logger rootLogger = Logger.getRootLogger();
|
Logger rootLogger = Logger.getRootLogger();
|
||||||
rootLogger.setLevel(Level.WARN);
|
rootLogger.setLevel(Level.WARN);
|
||||||
|
|
||||||
|
ConsoleAppender consoleAppender = null;
|
||||||
|
for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
|
||||||
|
e.hasMoreElements();) {
|
||||||
|
|
||||||
PatternLayout consoleLayout
|
Appender a = e.nextElement();
|
||||||
= (PatternLayout)rootLogger.getAppender("console").getLayout();
|
if(a instanceof ConsoleAppender) {
|
||||||
consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
|
consoleAppender = (ConsoleAppender)a;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(consoleAppender != null) {
|
||||||
|
Layout layout = consoleAppender.getLayout();
|
||||||
|
if(layout instanceof PatternLayout) {
|
||||||
|
PatternLayout consoleLayout = (PatternLayout)layout;
|
||||||
|
consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
|
||||||
|
}
|
||||||
|
}
|
||||||
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
|
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster = new MiniDFSCluster(conf, 2, true, null);
|
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
parentdir = new Path("/hbase");
|
parentdir = new Path("/hbase");
|
||||||
fs.mkdirs(parentdir);
|
fs.mkdirs(parentdir);
|
||||||
|
@ -118,10 +141,10 @@ public class TestHRegion extends TestCase {
|
||||||
|
|
||||||
log = new HLog(fs, newlogdir, conf);
|
log = new HLog(fs, newlogdir, conf);
|
||||||
desc = new HTableDescriptor("test", 3);
|
desc = new HTableDescriptor("test", 3);
|
||||||
desc.addFamily(new Text("contents"));
|
desc.addFamily(new Text("contents:"));
|
||||||
desc.addFamily(new Text("anchor"));
|
desc.addFamily(new Text("anchor:"));
|
||||||
region = new HRegion(parentdir, log, fs, conf,
|
region = new HRegion(parentdir, log, fs, conf,
|
||||||
new HRegionInfo(1, desc, null, null), null, oldlogfile);
|
new HRegionInfo(1, desc, null, null), null, oldlogfile);
|
||||||
|
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
failures = true;
|
failures = true;
|
||||||
|
@ -133,26 +156,39 @@ public class TestHRegion extends TestCase {
|
||||||
// Test basic functionality. Writes to contents:basic and anchor:anchornum-*
|
// Test basic functionality. Writes to contents:basic and anchor:anchornum-*
|
||||||
|
|
||||||
public void testBasic() throws IOException {
|
public void testBasic() throws IOException {
|
||||||
if (!initialized) {
|
if(!initialized) {
|
||||||
throw new IllegalStateException();
|
throw new IllegalStateException();
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
// Write out a bunch of values
|
// Write out a bunch of values
|
||||||
|
|
||||||
for (int k = 0; k < NUM_VALS; k++) {
|
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||||
long writeid = region.startUpdate(new Text("row_" + k));
|
long writeid = region.startUpdate(new Text("row_" + k));
|
||||||
region.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
|
region.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
|
||||||
region.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
|
region.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
|
||||||
region.commit(writeid);
|
region.commit(writeid);
|
||||||
}
|
}
|
||||||
|
System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
|
// Flush cache
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
region.flushcache(false);
|
region.flushcache(false);
|
||||||
|
|
||||||
|
System.out.println("Cache flush elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
// Read them back in
|
// Read them back in
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
Text collabel = null;
|
Text collabel = null;
|
||||||
for (int k = 0; k < NUM_VALS; k++) {
|
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||||
Text rowlabel = new Text("row_" + k);
|
Text rowlabel = new Text("row_" + k);
|
||||||
|
|
||||||
byte bodydata[] = region.get(rowlabel, CONTENTS_BASIC);
|
byte bodydata[] = region.get(rowlabel, CONTENTS_BASIC);
|
||||||
|
@ -160,44 +196,127 @@ public class TestHRegion extends TestCase {
|
||||||
String bodystr = new String(bodydata).toString().trim();
|
String bodystr = new String(bodydata).toString().trim();
|
||||||
String teststr = CONTENTSTR + k;
|
String teststr = CONTENTSTR + k;
|
||||||
assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
|
assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
|
||||||
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
|
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
|
||||||
bodystr, teststr);
|
bodystr, teststr);
|
||||||
collabel = new Text(ANCHORNUM + k);
|
collabel = new Text(ANCHORNUM + k);
|
||||||
bodydata = region.get(rowlabel, collabel);
|
bodydata = region.get(rowlabel, collabel);
|
||||||
bodystr = new String(bodydata).toString().trim();
|
bodystr = new String(bodydata).toString().trim();
|
||||||
teststr = ANCHORSTR + k;
|
teststr = ANCHORSTR + k;
|
||||||
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
|
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
|
||||||
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
|
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
|
||||||
bodystr, teststr);
|
bodystr, teststr);
|
||||||
/*
|
|
||||||
// Check to make sure that null values are actually null
|
|
||||||
for (int j = 0; j < Math.min(15, NUM_VALS); j++) {
|
|
||||||
if (k != j) {
|
|
||||||
collabel = new Text(ANCHORNUM + j);
|
|
||||||
byte results[] = region.get(rowlabel, collabel);
|
|
||||||
if (results != null) {
|
|
||||||
throw new IOException("Found incorrect value at [" + rowlabel + ", " + collabel + "] == " + new String(results).toString().trim());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
failures = true;
|
failures = true;
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testBadPuts() throws IOException {
|
||||||
|
if(!initialized) {
|
||||||
|
throw new IllegalStateException();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try put with bad lockid.
|
||||||
|
boolean exceptionThrown = false;
|
||||||
|
try {
|
||||||
|
region.put(-1, CONTENTS_BASIC, "bad input".getBytes());
|
||||||
|
} catch (LockException e) {
|
||||||
|
exceptionThrown = true;
|
||||||
|
}
|
||||||
|
assertTrue("Bad lock id", exceptionThrown);
|
||||||
|
|
||||||
|
// Try column name not registered in the table.
|
||||||
|
exceptionThrown = false;
|
||||||
|
long lockid = -1;
|
||||||
|
try {
|
||||||
|
lockid = region.startUpdate(new Text("Some old key"));
|
||||||
|
String unregisteredColName = "FamilyGroup:FamilyLabel";
|
||||||
|
region.put(lockid, new Text(unregisteredColName),
|
||||||
|
unregisteredColName.getBytes());
|
||||||
|
} catch (IOException e) {
|
||||||
|
exceptionThrown = true;
|
||||||
|
} finally {
|
||||||
|
if (lockid != -1) {
|
||||||
|
region.abort(lockid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertTrue("Bad family", exceptionThrown);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test getting and releasing locks.
|
||||||
|
*/
|
||||||
|
public void testLocks() {
|
||||||
|
final int threadCount = 10;
|
||||||
|
final int lockCount = 10;
|
||||||
|
|
||||||
|
List<Thread>threads = new ArrayList<Thread>(threadCount);
|
||||||
|
for (int i = 0; i < threadCount; i++) {
|
||||||
|
threads.add(new Thread(Integer.toString(i)) {
|
||||||
|
public void run() {
|
||||||
|
long [] lockids = new long[lockCount];
|
||||||
|
// Get locks.
|
||||||
|
for (int i = 0; i < lockCount; i++) {
|
||||||
|
try {
|
||||||
|
Text rowid = new Text(Integer.toString(i));
|
||||||
|
lockids[i] = region.obtainLock(rowid);
|
||||||
|
rowid.equals(region.getRowFromLock(lockids[i]));
|
||||||
|
LOG.debug(getName() + " locked " + rowid.toString());
|
||||||
|
} catch (IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG.debug(getName() + " set " +
|
||||||
|
Integer.toString(lockCount) + " locks");
|
||||||
|
|
||||||
|
// Abort outstanding locks.
|
||||||
|
for (int i = lockCount - 1; i >= 0; i--) {
|
||||||
|
try {
|
||||||
|
region.abort(lockids[i]);
|
||||||
|
LOG.debug(getName() + " unlocked " +
|
||||||
|
Integer.toString(i));
|
||||||
|
} catch (IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG.debug(getName() + " released " +
|
||||||
|
Integer.toString(lockCount) + " locks");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Startup all our threads.
|
||||||
|
for (Thread t : threads) {
|
||||||
|
t.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now wait around till all are done.
|
||||||
|
for (Thread t: threads) {
|
||||||
|
while (t.isAlive()) {
|
||||||
|
try {
|
||||||
|
Thread.sleep(1);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
// Go around again.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Test scanners. Writes contents:firstcol and anchor:secondcol
|
// Test scanners. Writes contents:firstcol and anchor:secondcol
|
||||||
|
|
||||||
public void testScan() throws IOException {
|
public void testScan() throws IOException {
|
||||||
if (!initialized) {
|
if(!initialized) {
|
||||||
throw new IllegalStateException();
|
throw new IllegalStateException();
|
||||||
}
|
}
|
||||||
|
|
||||||
Text cols[] = new Text[] {
|
Text cols[] = new Text[] {
|
||||||
CONTENTS_FIRSTCOL,
|
CONTENTS_FIRSTCOL,
|
||||||
ANCHOR_SECONDCOL
|
ANCHOR_SECONDCOL
|
||||||
};
|
};
|
||||||
|
|
||||||
// Test the Scanner!!!
|
// Test the Scanner!!!
|
||||||
|
@ -207,6 +326,9 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. Insert a bunch of values
|
// 1. Insert a bunch of values
|
||||||
|
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
for(int k = 0; k < vals1.length / 2; k++) {
|
for(int k = 0; k < vals1.length / 2; k++) {
|
||||||
String kLabel = String.format("%1$03d", k);
|
String kLabel = String.format("%1$03d", k);
|
||||||
|
|
||||||
|
@ -217,7 +339,13 @@ public class TestHRegion extends TestCase {
|
||||||
numInserted += 2;
|
numInserted += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Scan
|
System.out.println("Write " + (vals1.length / 2) + " elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
|
// 2. Scan from cache
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
HScannerInterface s = region.getScanner(cols, new Text());
|
HScannerInterface s = region.getScanner(cols, new Text());
|
||||||
int numFetched = 0;
|
int numFetched = 0;
|
||||||
try {
|
try {
|
||||||
|
@ -225,16 +353,16 @@ public class TestHRegion extends TestCase {
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
int k = 0;
|
int k = 0;
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text col = it.next();
|
Text col = it.next();
|
||||||
byte val[] = curVals.get(col);
|
byte val[] = curVals.get(col);
|
||||||
int curval = Integer.parseInt(new String(val).trim());
|
int curval = Integer.parseInt(new String(val).trim());
|
||||||
|
|
||||||
for(int j = 0; j < cols.length; j++) {
|
for(int j = 0; j < cols.length; j++) {
|
||||||
if (col.compareTo(cols[j]) == 0) {
|
if(col.compareTo(cols[j]) == 0) {
|
||||||
assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
||||||
+ ", Value for " + col + " should be: " + k
|
+ ", Value for " + col + " should be: " + k
|
||||||
+ ", but was fetched as: " + curval, k, curval);
|
+ ", but was fetched as: " + curval, k, curval);
|
||||||
numFetched++;
|
numFetched++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -247,10 +375,23 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + (vals1.length / 2)
|
||||||
|
+ " rows from cache. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
// 3. Flush to disk
|
// 3. Flush to disk
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
region.flushcache(false);
|
region.flushcache(false);
|
||||||
|
|
||||||
// 4. Scan
|
System.out.println("Cache flush elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
|
// 4. Scan from disk
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
s = region.getScanner(cols, new Text());
|
s = region.getScanner(cols, new Text());
|
||||||
numFetched = 0;
|
numFetched = 0;
|
||||||
try {
|
try {
|
||||||
|
@ -258,16 +399,16 @@ public class TestHRegion extends TestCase {
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
int k = 0;
|
int k = 0;
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text col = it.next();
|
Text col = it.next();
|
||||||
byte val[] = curVals.get(col);
|
byte val[] = curVals.get(col);
|
||||||
int curval = Integer.parseInt(new String(val).trim());
|
int curval = Integer.parseInt(new String(val).trim());
|
||||||
|
|
||||||
for(int j = 0; j < cols.length; j++) {
|
for(int j = 0; j < cols.length; j++) {
|
||||||
if (col.compareTo(cols[j]) == 0) {
|
if(col.compareTo(cols[j]) == 0) {
|
||||||
assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
||||||
+ ", Value for " + col + " should be: " + k
|
+ ", Value for " + col + " should be: " + k
|
||||||
+ ", but was fetched as: " + curval, k, curval);
|
+ ", but was fetched as: " + curval, k, curval);
|
||||||
numFetched++;
|
numFetched++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -280,7 +421,14 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + (vals1.length / 2)
|
||||||
|
+ " rows from disk. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
// 5. Insert more values
|
// 5. Insert more values
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
for(int k = vals1.length/2; k < vals1.length; k++) {
|
for(int k = vals1.length/2; k < vals1.length; k++) {
|
||||||
String kLabel = String.format("%1$03d", k);
|
String kLabel = String.format("%1$03d", k);
|
||||||
|
|
||||||
|
@ -291,7 +439,13 @@ public class TestHRegion extends TestCase {
|
||||||
numInserted += 2;
|
numInserted += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. Scan
|
System.out.println("Write " + (vals1.length / 2) + " rows. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
|
// 6. Scan from cache and disk
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
s = region.getScanner(cols, new Text());
|
s = region.getScanner(cols, new Text());
|
||||||
numFetched = 0;
|
numFetched = 0;
|
||||||
try {
|
try {
|
||||||
|
@ -299,16 +453,16 @@ public class TestHRegion extends TestCase {
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
int k = 0;
|
int k = 0;
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text col = it.next();
|
Text col = it.next();
|
||||||
byte val[] = curVals.get(col);
|
byte val[] = curVals.get(col);
|
||||||
int curval = Integer.parseInt(new String(val).trim());
|
int curval = Integer.parseInt(new String(val).trim());
|
||||||
|
|
||||||
for(int j = 0; j < cols.length; j++) {
|
for(int j = 0; j < cols.length; j++) {
|
||||||
if (col.compareTo(cols[j]) == 0) {
|
if(col.compareTo(cols[j]) == 0) {
|
||||||
assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
||||||
+ ", Value for " + col + " should be: " + k
|
+ ", Value for " + col + " should be: " + k
|
||||||
+ ", but was fetched as: " + curval, k, curval);
|
+ ", but was fetched as: " + curval, k, curval);
|
||||||
numFetched++;
|
numFetched++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -321,10 +475,23 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + vals1.length
|
||||||
|
+ " rows from cache and disk. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
// 7. Flush to disk
|
// 7. Flush to disk
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
region.flushcache(false);
|
region.flushcache(false);
|
||||||
|
|
||||||
// 8. Scan
|
System.out.println("Cache flush elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
|
// 8. Scan from disk
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
s = region.getScanner(cols, new Text());
|
s = region.getScanner(cols, new Text());
|
||||||
numFetched = 0;
|
numFetched = 0;
|
||||||
try {
|
try {
|
||||||
|
@ -332,7 +499,7 @@ public class TestHRegion extends TestCase {
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
int k = 0;
|
int k = 0;
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text col = it.next();
|
Text col = it.next();
|
||||||
byte val[] = curVals.get(col);
|
byte val[] = curVals.get(col);
|
||||||
int curval = Integer.parseInt(new String(val).trim());
|
int curval = Integer.parseInt(new String(val).trim());
|
||||||
|
@ -340,7 +507,7 @@ public class TestHRegion extends TestCase {
|
||||||
for (int j = 0; j < cols.length; j++) {
|
for (int j = 0; j < cols.length; j++) {
|
||||||
if (col.compareTo(cols[j]) == 0) {
|
if (col.compareTo(cols[j]) == 0) {
|
||||||
assertEquals("Value for " + col + " should be: " + k
|
assertEquals("Value for " + col + " should be: " + k
|
||||||
+ ", but was fetched as: " + curval, curval, k);
|
+ ", but was fetched as: " + curval, curval, k);
|
||||||
numFetched++;
|
numFetched++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -353,8 +520,14 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + vals1.length
|
||||||
|
+ " rows from disk. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
// 9. Scan with a starting point
|
// 9. Scan with a starting point
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
s = region.getScanner(cols, new Text("row_vals1_500"));
|
s = region.getScanner(cols, new Text("row_vals1_500"));
|
||||||
numFetched = 0;
|
numFetched = 0;
|
||||||
try {
|
try {
|
||||||
|
@ -362,7 +535,7 @@ public class TestHRegion extends TestCase {
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
int k = 500;
|
int k = 500;
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text col = it.next();
|
Text col = it.next();
|
||||||
byte val[] = curVals.get(col);
|
byte val[] = curVals.get(col);
|
||||||
int curval = Integer.parseInt(new String(val).trim());
|
int curval = Integer.parseInt(new String(val).trim());
|
||||||
|
@ -370,7 +543,7 @@ public class TestHRegion extends TestCase {
|
||||||
for (int j = 0; j < cols.length; j++) {
|
for (int j = 0; j < cols.length; j++) {
|
||||||
if (col.compareTo(cols[j]) == 0) {
|
if (col.compareTo(cols[j]) == 0) {
|
||||||
assertEquals("Value for " + col + " should be: " + k
|
assertEquals("Value for " + col + " should be: " + k
|
||||||
+ ", but was fetched as: " + curval, curval, k);
|
+ ", but was fetched as: " + curval, curval, k);
|
||||||
numFetched++;
|
numFetched++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -383,6 +556,9 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
assertEquals("Should have fetched " + (numInserted / 2) + " values, but fetched " + numFetched, (numInserted / 2), numFetched);
|
assertEquals("Should have fetched " + (numInserted / 2) + " values, but fetched " + numFetched, (numInserted / 2), numFetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + (numFetched / 2)
|
||||||
|
+ " rows from disk with specified start point. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do a large number of writes. Disabled if not debugging because it takes a
|
// Do a large number of writes. Disabled if not debugging because it takes a
|
||||||
|
@ -390,10 +566,10 @@ public class TestHRegion extends TestCase {
|
||||||
// Creates contents:body
|
// Creates contents:body
|
||||||
|
|
||||||
public void testBatchWrite() throws IOException {
|
public void testBatchWrite() throws IOException {
|
||||||
if (!initialized || failures) {
|
if(!initialized || failures) {
|
||||||
throw new IllegalStateException();
|
throw new IllegalStateException();
|
||||||
}
|
}
|
||||||
if (!Environment.debugging) {
|
if(! Environment.debugging) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,7 +582,7 @@ public class TestHRegion extends TestCase {
|
||||||
// 1M writes
|
// 1M writes
|
||||||
|
|
||||||
int valsize = 1000;
|
int valsize = 1000;
|
||||||
for (int k = FIRST_ROW; k < N_ROWS; k++) {
|
for (int k = FIRST_ROW; k <= N_ROWS; k++) {
|
||||||
// Come up with a random 1000-byte string
|
// Come up with a random 1000-byte string
|
||||||
String randstr1 = "" + System.currentTimeMillis();
|
String randstr1 = "" + System.currentTimeMillis();
|
||||||
StringBuffer buf1 = new StringBuffer("val_" + k + "__");
|
StringBuffer buf1 = new StringBuffer("val_" + k + "__");
|
||||||
|
@ -437,7 +613,7 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
long startCompact = System.currentTimeMillis();
|
long startCompact = System.currentTimeMillis();
|
||||||
if (region.compactStores()) {
|
if(region.compactStores()) {
|
||||||
totalCompact = System.currentTimeMillis() - startCompact;
|
totalCompact = System.currentTimeMillis() - startCompact;
|
||||||
System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
|
System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
|
||||||
|
|
||||||
|
@ -457,7 +633,8 @@ public class TestHRegion extends TestCase {
|
||||||
System.out.println("Total time, rows/second: " + (N_ROWS / (totalElapsed / 1000.0)));
|
System.out.println("Total time, rows/second: " + (N_ROWS / (totalElapsed / 1000.0)));
|
||||||
System.out.println("Adjusted time (not including flush, compact, or log): " + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
|
System.out.println("Adjusted time (not including flush, compact, or log): " + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
|
||||||
System.out.println("Adjusted time, rows/second: " + (N_ROWS / ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
|
System.out.println("Adjusted time, rows/second: " + (N_ROWS / ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
|
||||||
|
System.out.println();
|
||||||
|
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
failures = true;
|
failures = true;
|
||||||
throw e;
|
throw e;
|
||||||
|
@ -467,14 +644,14 @@ public class TestHRegion extends TestCase {
|
||||||
// NOTE: This test depends on testBatchWrite succeeding
|
// NOTE: This test depends on testBatchWrite succeeding
|
||||||
|
|
||||||
public void testSplitAndMerge() throws IOException {
|
public void testSplitAndMerge() throws IOException {
|
||||||
if (!initialized || failures) {
|
if(!initialized || failures) {
|
||||||
throw new IllegalStateException();
|
throw new IllegalStateException();
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
Text midKey = new Text();
|
Text midKey = new Text();
|
||||||
|
|
||||||
if (region.needsSplit(midKey)) {
|
if(region.needsSplit(midKey)) {
|
||||||
System.out.println("Needs split");
|
System.out.println("Needs split");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -482,15 +659,28 @@ public class TestHRegion extends TestCase {
|
||||||
|
|
||||||
Text midkey = new Text("row_" + (Environment.debugging ? (N_ROWS / 2) : (NUM_VALS/2)));
|
Text midkey = new Text("row_" + (Environment.debugging ? (N_ROWS / 2) : (NUM_VALS/2)));
|
||||||
Path oldRegionPath = region.getRegionDir();
|
Path oldRegionPath = region.getRegionDir();
|
||||||
|
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
HRegion subregions[] = region.closeAndSplit(midkey);
|
HRegion subregions[] = region.closeAndSplit(midkey);
|
||||||
|
|
||||||
|
System.out.println("Split region elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
assertEquals("Number of subregions", subregions.length, 2);
|
assertEquals("Number of subregions", subregions.length, 2);
|
||||||
|
|
||||||
// Now merge it back together
|
// Now merge it back together
|
||||||
|
|
||||||
Path oldRegion1 = subregions[0].getRegionDir();
|
Path oldRegion1 = subregions[0].getRegionDir();
|
||||||
Path oldRegion2 = subregions[1].getRegionDir();
|
Path oldRegion2 = subregions[1].getRegionDir();
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
region = HRegion.closeAndMerge(subregions[0], subregions[1]);
|
region = HRegion.closeAndMerge(subregions[0], subregions[1]);
|
||||||
|
|
||||||
|
System.out.println("Merge regions elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
fs.delete(oldRegionPath);
|
fs.delete(oldRegionPath);
|
||||||
fs.delete(oldRegion1);
|
fs.delete(oldRegion1);
|
||||||
fs.delete(oldRegion2);
|
fs.delete(oldRegion2);
|
||||||
|
@ -504,17 +694,19 @@ public class TestHRegion extends TestCase {
|
||||||
// This test verifies that everything is still there after splitting and merging
|
// This test verifies that everything is still there after splitting and merging
|
||||||
|
|
||||||
public void testRead() throws IOException {
|
public void testRead() throws IOException {
|
||||||
if (!initialized || failures) {
|
if(!initialized || failures) {
|
||||||
throw new IllegalStateException();
|
throw new IllegalStateException();
|
||||||
}
|
}
|
||||||
|
|
||||||
// First verify the data written by testBasic()
|
// First verify the data written by testBasic()
|
||||||
|
|
||||||
Text[] cols = new Text[] {
|
Text[] cols = new Text[] {
|
||||||
new Text(ANCHORNUM + "[0-9]+"),
|
new Text(ANCHORNUM + "[0-9]+"),
|
||||||
new Text(CONTENTS_BASIC)
|
new Text(CONTENTS_BASIC)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
HScannerInterface s = region.getScanner(cols, new Text());
|
HScannerInterface s = region.getScanner(cols, new Text());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -525,23 +717,23 @@ public class TestHRegion extends TestCase {
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
int k = 0;
|
int k = 0;
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text col = it.next();
|
Text col = it.next();
|
||||||
byte val[] = curVals.get(col);
|
byte val[] = curVals.get(col);
|
||||||
String curval = new String(val).trim();
|
String curval = new String(val).trim();
|
||||||
|
|
||||||
if (col.compareTo(CONTENTS_BASIC) == 0) {
|
if(col.compareTo(CONTENTS_BASIC) == 0) {
|
||||||
assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
||||||
+ ", Value for " + col + " should start with: " + CONTENTSTR
|
+ ", Value for " + col + " should start with: " + CONTENTSTR
|
||||||
+ ", but was fetched as: " + curval,
|
+ ", but was fetched as: " + curval,
|
||||||
curval.startsWith(CONTENTSTR));
|
curval.startsWith(CONTENTSTR));
|
||||||
contentsFetched++;
|
contentsFetched++;
|
||||||
|
|
||||||
} else if (col.toString().startsWith(ANCHORNUM)) {
|
} else if(col.toString().startsWith(ANCHORNUM)) {
|
||||||
assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
|
||||||
+ ", Value for " + col + " should start with: " + ANCHORSTR
|
+ ", Value for " + col + " should start with: " + ANCHORSTR
|
||||||
+ ", but was fetched as: " + curval,
|
+ ", but was fetched as: " + curval,
|
||||||
curval.startsWith(ANCHORSTR));
|
curval.startsWith(ANCHORSTR));
|
||||||
anchorFetched++;
|
anchorFetched++;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -554,6 +746,10 @@ public class TestHRegion extends TestCase {
|
||||||
assertEquals("Expected " + NUM_VALS + " " + CONTENTS_BASIC + " values, but fetched " + contentsFetched, NUM_VALS, contentsFetched);
|
assertEquals("Expected " + NUM_VALS + " " + CONTENTS_BASIC + " values, but fetched " + contentsFetched, NUM_VALS, contentsFetched);
|
||||||
assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM + " values, but fetched " + anchorFetched, NUM_VALS, anchorFetched);
|
assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM + " values, but fetched " + anchorFetched, NUM_VALS, anchorFetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + NUM_VALS
|
||||||
|
+ " rows from disk. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
s.close();
|
s.close();
|
||||||
}
|
}
|
||||||
|
@ -561,9 +757,11 @@ public class TestHRegion extends TestCase {
|
||||||
// Verify testScan data
|
// Verify testScan data
|
||||||
|
|
||||||
cols = new Text[] {
|
cols = new Text[] {
|
||||||
CONTENTS_FIRSTCOL,
|
CONTENTS_FIRSTCOL,
|
||||||
ANCHOR_SECONDCOL
|
ANCHOR_SECONDCOL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
s = region.getScanner(cols, new Text());
|
s = region.getScanner(cols, new Text());
|
||||||
try {
|
try {
|
||||||
|
@ -572,7 +770,7 @@ public class TestHRegion extends TestCase {
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
int k = 0;
|
int k = 0;
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text col = it.next();
|
Text col = it.next();
|
||||||
byte val[] = curVals.get(col);
|
byte val[] = curVals.get(col);
|
||||||
int curval = Integer.parseInt(new String(val).trim());
|
int curval = Integer.parseInt(new String(val).trim());
|
||||||
|
@ -580,7 +778,7 @@ public class TestHRegion extends TestCase {
|
||||||
for (int j = 0; j < cols.length; j++) {
|
for (int j = 0; j < cols.length; j++) {
|
||||||
if (col.compareTo(cols[j]) == 0) {
|
if (col.compareTo(cols[j]) == 0) {
|
||||||
assertEquals("Value for " + col + " should be: " + k
|
assertEquals("Value for " + col + " should be: " + k
|
||||||
+ ", but was fetched as: " + curval, curval, k);
|
+ ", but was fetched as: " + curval, curval, k);
|
||||||
numFetched++;
|
numFetched++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -590,13 +788,19 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + (numFetched / 2)
|
||||||
|
+ " rows from disk. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
s.close();
|
s.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify testBatchWrite data
|
// Verify testBatchWrite data
|
||||||
|
|
||||||
if (Environment.debugging) {
|
if(Environment.debugging) {
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text());
|
s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text());
|
||||||
try {
|
try {
|
||||||
int numFetched = 0;
|
int numFetched = 0;
|
||||||
|
@ -604,7 +808,7 @@ public class TestHRegion extends TestCase {
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
int k = 0;
|
int k = 0;
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
Text col = it.next();
|
Text col = it.next();
|
||||||
byte val[] = curVals.get(col);
|
byte val[] = curVals.get(col);
|
||||||
|
|
||||||
|
@ -617,6 +821,10 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
assertEquals("Inserted " + N_ROWS + " values, but fetched " + numFetched, N_ROWS, numFetched);
|
assertEquals("Inserted " + N_ROWS + " values, but fetched " + numFetched, N_ROWS, numFetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + N_ROWS
|
||||||
|
+ " rows from disk. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
s.close();
|
s.close();
|
||||||
}
|
}
|
||||||
|
@ -625,9 +833,11 @@ public class TestHRegion extends TestCase {
|
||||||
// Test a scanner which only specifies the column family name
|
// Test a scanner which only specifies the column family name
|
||||||
|
|
||||||
cols = new Text[] {
|
cols = new Text[] {
|
||||||
new Text("anchor:")
|
new Text("anchor:")
|
||||||
};
|
};
|
||||||
|
|
||||||
|
startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
s = region.getScanner(cols, new Text());
|
s = region.getScanner(cols, new Text());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -635,7 +845,7 @@ public class TestHRegion extends TestCase {
|
||||||
HStoreKey curKey = new HStoreKey();
|
HStoreKey curKey = new HStoreKey();
|
||||||
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
|
||||||
while(s.next(curKey, curVals)) {
|
while(s.next(curKey, curVals)) {
|
||||||
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
|
for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
|
||||||
it.next();
|
it.next();
|
||||||
fetched++;
|
fetched++;
|
||||||
}
|
}
|
||||||
|
@ -643,6 +853,10 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
assertEquals("Inserted " + (NUM_VALS + numInserted/2) + " values, but fetched " + fetched, (NUM_VALS + numInserted/2), fetched);
|
assertEquals("Inserted " + (NUM_VALS + numInserted/2) + " values, but fetched " + fetched, (NUM_VALS + numInserted/2), fetched);
|
||||||
|
|
||||||
|
System.out.println("Scanned " + fetched
|
||||||
|
+ " rows from disk. Elapsed time: "
|
||||||
|
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
s.close();
|
s.close();
|
||||||
}
|
}
|
||||||
|
@ -650,7 +864,7 @@ public class TestHRegion extends TestCase {
|
||||||
|
|
||||||
|
|
||||||
private static void deleteFile(File f) {
|
private static void deleteFile(File f) {
|
||||||
if (f.isDirectory()) {
|
if(f.isDirectory()) {
|
||||||
File[] children = f.listFiles();
|
File[] children = f.listFiles();
|
||||||
for(int i = 0; i < children.length; i++) {
|
for(int i = 0; i < children.length; i++) {
|
||||||
deleteFile(children[i]);
|
deleteFile(children[i]);
|
||||||
|
@ -660,7 +874,7 @@ public class TestHRegion extends TestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testCleanup() throws IOException {
|
public void testCleanup() throws IOException {
|
||||||
if (!initialized) {
|
if(!initialized) {
|
||||||
throw new IllegalStateException();
|
throw new IllegalStateException();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -672,5 +886,5 @@ public class TestHRegion extends TestCase {
|
||||||
|
|
||||||
deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
|
deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue