HBASE-7234 Remove long-deprecated HServerAddress and HServerInfo Writables

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1415766 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-11-30 17:50:44 +00:00
parent f5a3a12b24
commit ac3c5eb6fa
25 changed files with 161 additions and 929 deletions

View File

@ -97,15 +97,6 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
return regionInfo;
}
/**
* Do not use!!! Creates a HServerAddress instance which will do a resolve.
* @return HServerAddress
* @deprecated Use {@link #getHostnamePort}
*/
public HServerAddress getServerAddress() {
return new HServerAddress(this.hostname, this.port);
}
public String getHostname() {
return this.hostname;
}

View File

@ -1,199 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.WritableComparable;
/**
* HServerAddress hosts a {@link InetSocketAddress} and makes it
* {@link WritableComparable}. Resolves on construction AND on
* deserialization -- since we're internally creating an InetSocketAddress --
* so could end up with different results if the two ends of serialization have
* different resolvers. Be careful where you use it. Should only be used when
* you need to pass an InetSocketAddress across an RPC. Even then its a bad
* idea because of the above resolve issue.
* @deprecated Use {@link InetSocketAddress} or {@link ServerName} or
* a hostname String and port.
*/
public class HServerAddress implements WritableComparable<HServerAddress> {
// Hard to deprecate this class. Its in the API as internal class,
// in particular as an inner class of HRegionLocation. Besides, sometimes
// we do want to serialize a InetSocketAddress; this class can be used then.
private InetSocketAddress address = null;
private String cachedToString = "";
/**
* Constructor for deserialization use only.
*/
public HServerAddress() {
super();
}
/**
* Construct an instance from an {@link InetSocketAddress}.
* @param address InetSocketAddress of server
*/
public HServerAddress(InetSocketAddress address) {
this.address = address;
checkBindAddressCanBeResolved();
this.cachedToString = createCachedToString();
}
private String createCachedToString() {
return this.address.toString();
}
/**
* @param hostname Hostname
* @param port Port number
*/
public HServerAddress(final String hostname, final int port) {
this(getResolvedAddress(new InetSocketAddress(hostname, port)));
}
/**
* Copy-constructor.
* @param other HServerAddress to copy from
*/
public HServerAddress(HServerAddress other) {
this(getResolvedAddress(new InetSocketAddress(other.getHostname(), other.getPort())));
}
private static InetSocketAddress getResolvedAddress(InetSocketAddress address) {
String bindAddress = getBindAddressInternal(address);
int port = address.getPort();
return new InetSocketAddress(bindAddress, port);
}
/** @return Bind address -- the raw IP, the result of a call to
* InetSocketAddress#getAddress()#getHostAddress() --
* or null if cannot resolve */
public String getBindAddress() {
return getBindAddressInternal(address);
}
private static String getBindAddressInternal(InetSocketAddress address) {
final InetAddress addr = address.getAddress();
if (addr != null) {
return addr.getHostAddress();
} else {
LogFactory.getLog(HServerAddress.class).error("Could not resolve the"
+ " DNS name of " + address.getHostName());
return null;
}
}
private void checkBindAddressCanBeResolved() {
if (getBindAddress() == null) {
throw new IllegalArgumentException("Could not resolve the"
+ " DNS name of " + this.address.toString());
}
}
/** @return Port number */
public int getPort() {
return this.address.getPort();
}
/** @return Hostname */
public String getHostname() {
// Kerberos is case-sensitive, and dictates that, where hostnames are
// case-insensitive (as in DNS), the lowercase version must be used
// So here we lowercase to properly interact with kerberos auth
return this.address.getHostName().toLowerCase();
}
/**
* @return Returns <hostname> ':' <port>
*/
public String getHostnameAndPort() {
return getHostname() + ":" + getPort();
}
/** @return The InetSocketAddress */
public InetSocketAddress getInetSocketAddress() {
return this.address;
}
/**
* @return String formatted as <code>&lt;bind address> ':' &lt;port></code>
*/
@Override
public String toString() {
return this.cachedToString;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null) return false;
if (getClass() != o.getClass()) return false;
return compareTo((HServerAddress)o) == 0;
}
@Override
public int hashCode() {
int result = address == null? 0: address.hashCode();
result ^= toString().hashCode();
return result;
}
//
// Writable
//
public void readFields(DataInput in) throws IOException {
String hostname = in.readUTF();
int port = in.readInt();
if (hostname != null && hostname.length() > 0) {
this.address = getResolvedAddress(new InetSocketAddress(hostname, port));
checkBindAddressCanBeResolved();
createCachedToString();
}
}
public void write(DataOutput out) throws IOException {
if (this.address == null) {
out.writeUTF("");
out.writeInt(0);
} else {
out.writeUTF(this.address.getAddress().getHostName());
out.writeInt(this.address.getPort());
}
}
//
// Comparable
//
public int compareTo(HServerAddress o) {
// Addresses as Strings may not compare though address is for the one
// server with only difference being that one address has hostname
// resolved whereas other only has IP.
if (this.address.equals(o.address)) return 0;
return toString().compareTo(o.toString());
}
}

View File

@ -1,150 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.io.VersionedWritable;
import org.apache.hadoop.io.WritableComparable;
/**
* HServerInfo is meta info about an {@link HRegionServer}. It hosts the
* {@link HServerAddress}, its webui port, and its server startcode. It was
* used to pass meta info about a server across an RPC but we've since made
* it so regionserver info is up in ZooKeeper and so this class is on its
* way out. It used to carry HServerLoad but as off HBase 0.92.0, the
* HServerLoad is passed independent of this class. Also, we now no longer pass
* the webui from regionserver to master (TODO: Fix).
* @deprecated Use {@link InetSocketAddress} and or {@link ServerName} and or
* HServerLoad
*/
public class HServerInfo extends VersionedWritable
implements WritableComparable<HServerInfo> {
private static final byte VERSION = 1;
private HServerAddress serverAddress = new HServerAddress();
private long startCode;
private int webuiport;
public HServerInfo() {
super();
}
/**
* Constructor that creates a HServerInfo with a generated startcode
* @param serverAddress
* @param webuiport Port the webui runs on.
*/
public HServerInfo(final HServerAddress serverAddress, final int webuiport) {
this(serverAddress, System.currentTimeMillis(), webuiport);
}
public HServerInfo(HServerAddress serverAddress, long startCode,
final int webuiport) {
this.serverAddress = serverAddress;
this.startCode = startCode;
this.webuiport = webuiport;
}
/**
* Copy-constructor
* @param other
*/
public HServerInfo(HServerInfo other) {
this.serverAddress = new HServerAddress(other.getServerAddress());
this.startCode = other.getStartCode();
this.webuiport = other.getInfoPort();
}
/** @return the object version number */
public byte getVersion() {
return VERSION;
}
public synchronized HServerAddress getServerAddress() {
return new HServerAddress(serverAddress);
}
public synchronized long getStartCode() {
return startCode;
}
public int getInfoPort() {
return getWebuiPort();
}
public int getWebuiPort() {
return this.webuiport;
}
public String getHostname() {
return this.serverAddress.getHostname();
}
/**
* @return ServerName and load concatenated.
*/
@Override
public synchronized String toString() {
return ServerName.getServerName(this.serverAddress.getHostnameAndPort(),
this.startCode);
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
return compareTo((HServerInfo)obj) == 0;
}
@Override
public int hashCode() {
int code = this.serverAddress.hashCode();
code ^= this.webuiport;
code ^= this.startCode;
return code;
}
public void readFields(DataInput in) throws IOException {
super.readFields(in);
this.serverAddress.readFields(in);
this.startCode = in.readLong();
this.webuiport = in.readInt();
}
public void write(DataOutput out) throws IOException {
super.write(out);
this.serverAddress.write(out);
out.writeLong(this.startCode);
out.writeInt(this.webuiport);
}
public int compareTo(HServerInfo o) {
int compare = this.serverAddress.compareTo(o.getServerAddress());
if (compare != 0) return compare;
if (this.webuiport != o.getInfoPort()) return this.webuiport - o.getInfoPort();
if (this.startCode != o.getStartCode()) return (int)(this.startCode - o.getStartCode());
return 0;
}
}

View File

@ -28,6 +28,7 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
@ -624,4 +625,20 @@ public class MetaReader {
this.results.add(r);
}
}
/**
* Count regions in <code>.META.</code> for passed table.
* @param c
* @param tableName
* @return Count or regions in table <code>tableName</code>
* @throws IOException
*/
public static int getRegionCount(final Configuration c, final String tableName) throws IOException {
HTable t = new HTable(c, tableName);
try {
return t.getRegionLocations().size();
} finally {
t.close();
}
}
}

View File

@ -21,16 +21,13 @@ package org.apache.hadoop.hbase.client;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterAdminProtocol;
import org.apache.hadoop.hbase.MasterMonitorProtocol;
@ -350,14 +347,6 @@ public interface HConnection extends Abortable, Closeable {
*/
public boolean getRegionCachePrefetch(final byte[] tableName);
/**
* Load the region map and warm up the global region cache for the table.
* @param tableName name of the table to perform region cache prewarm.
* @param regions a region map.
*/
public void prewarmRegionCache(final byte[] tableName,
final Map<HRegionInfo, HServerAddress> regions);
/**
* Scan zookeeper to get the number of region servers
* @return the number of region servers that are currently running

View File

@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterAdminProtocol;
@ -2241,17 +2240,6 @@ public class HConnectionManager {
return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName));
}
@Override
public void prewarmRegionCache(byte[] tableName,
Map<HRegionInfo, HServerAddress> regions) {
for (Map.Entry<HRegionInfo, HServerAddress> e : regions.entrySet()) {
HServerAddress hsa = e.getValue();
if (hsa == null || hsa.getInetSocketAddress() == null) continue;
cacheLocation(tableName,
new HRegionLocation(e.getKey(), hsa.getHostname(), hsa.getPort()));
}
}
@Override
public void abort(final String msg, Throwable t) {
if (t instanceof KeeperException.SessionExpiredException

View File

@ -19,9 +19,6 @@
package org.apache.hadoop.hbase.client;
import java.io.Closeable;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Proxy;
@ -40,9 +37,7 @@ import java.util.concurrent.Future;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.protobuf.Service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -52,14 +47,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.io.DataInputInputStream;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.ipc.ExecRPCInvoker;
@ -75,11 +68,11 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowRequest;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
/**
@ -478,32 +471,6 @@ public class HTable implements HTableInterface {
endKeyList.toArray(new byte[endKeyList.size()][]));
}
/**
* Gets all the regions and their address for this table.
* @return A map of HRegionInfo with it's server address
* @throws IOException if a remote or network exception occurs
* @deprecated Use {@link #getRegionLocations()} or {@link #getStartEndKeys()}
*/
@Deprecated
public Map<HRegionInfo, HServerAddress> getRegionsInfo() throws IOException {
final Map<HRegionInfo, HServerAddress> regionMap =
new TreeMap<HRegionInfo, HServerAddress>();
final Map<HRegionInfo, ServerName> regionLocations = getRegionLocations();
for (Map.Entry<HRegionInfo, ServerName> entry : regionLocations.entrySet()) {
HServerAddress server = new HServerAddress();
ServerName serverName = entry.getValue();
if (serverName != null && serverName.getHostAndPort() != null) {
server = new HServerAddress(Addressing.createInetSocketAddressFromHostAndPortStr(
serverName.getHostAndPort()));
}
regionMap.put(entry.getKey(), server);
}
return regionMap;
}
/**
* Gets all the regions and their address for this table.
* <p>
@ -512,6 +479,7 @@ public class HTable implements HTableInterface {
* @throws IOException if a remote or network exception occurs
*/
public NavigableMap<HRegionInfo, ServerName> getRegionLocations() throws IOException {
// TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocation, singular, returns an HRegionLocation.
return MetaScanner.allTableRegions(getConfiguration(), getTableName(), false);
}
@ -544,106 +512,6 @@ public class HTable implements HTableInterface {
return regionList;
}
/**
* Save the passed region information and the table's regions
* cache.
* <p>
* This is mainly useful for the MapReduce integration. You can call
* {@link #deserializeRegionInfo deserializeRegionInfo}
* to deserialize regions information from a
* {@link DataInput}, then call this method to load them to cache.
*
* <pre>
* {@code
* HTable t1 = new HTable("foo");
* FileInputStream fis = new FileInputStream("regions.dat");
* DataInputStream dis = new DataInputStream(fis);
*
* Map<HRegionInfo, HServerAddress> hm = t1.deserializeRegionInfo(dis);
* t1.prewarmRegionCache(hm);
* }
* </pre>
* @param regionMap This piece of regions information will be loaded
* to region cache.
*/
public void prewarmRegionCache(Map<HRegionInfo, HServerAddress> regionMap) {
this.connection.prewarmRegionCache(this.getTableName(), regionMap);
}
/**
* Serialize the regions information of this table and output
* to <code>out</code>.
* <p>
* This is mainly useful for the MapReduce integration. A client could
* perform a large scan for all the regions for the table, serialize the
* region info to a file. MR job can ship a copy of the meta for the table in
* the DistributedCache.
* <pre>
* {@code
* FileOutputStream fos = new FileOutputStream("regions.dat");
* DataOutputStream dos = new DataOutputStream(fos);
* table.serializeRegionInfo(dos);
* dos.flush();
* dos.close();
* }
* </pre>
* @param out {@link DataOutput} to serialize this object into.
* @throws IOException if a remote or network exception occurs
* @deprecated serializing/deserializing regioninfo's are deprecated
*/
@Deprecated
public void serializeRegionInfo(DataOutput out) throws IOException {
Map<HRegionInfo, HServerAddress> allRegions = this.getRegionsInfo();
// first, write number of regions
out.writeInt(allRegions.size());
for (Map.Entry<HRegionInfo, HServerAddress> es : allRegions.entrySet()) {
byte[] hriBytes = es.getKey().toDelimitedByteArray();
out.write(hriBytes);
es.getValue().write(out);
}
}
/**
* Read from <code>in</code> and deserialize the regions information.
*
* <p>It behaves similarly as {@link #getRegionsInfo getRegionsInfo}, except
* that it loads the region map from a {@link DataInput} object.
*
* <p>It is supposed to be followed immediately by {@link
* #prewarmRegionCache prewarmRegionCache}.
*
* <p>
* Please refer to {@link #prewarmRegionCache prewarmRegionCache} for usage.
*
* @param in {@link DataInput} object.
* @return A map of HRegionInfo with its server address.
* @throws IOException if an I/O exception occurs.
* @deprecated serializing/deserializing regioninfo's are deprecated
*/
@Deprecated
public Map<HRegionInfo, HServerAddress> deserializeRegionInfo(DataInput in)
throws IOException {
final Map<HRegionInfo, HServerAddress> allRegions =
new TreeMap<HRegionInfo, HServerAddress>();
DataInputStream is = null;
if (in instanceof DataInputStream) {
is = (DataInputStream) in;
} else {
is = new DataInputStream(DataInputInputStream.constructInputStream(in));
}
// the first integer is expected to be the size of records
int regionsCount = is.readInt();
for (int i = 0; i < regionsCount; ++i) {
HRegionInfo hri = HRegionInfo.parseFrom(is);
HServerAddress hsa = new HServerAddress();
hsa.readFields(is);
allRegions.put(hri, hsa);
}
return allRegions;
}
/**
* {@inheritDoc}
*/

View File

@ -22,9 +22,6 @@ package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
import org.apache.hadoop.hbase.util.Addressing;
import java.util.Collection;
import java.util.HashMap;
@ -40,7 +37,7 @@ import java.util.Set;
* and if the result is false, you have input error problems, otherwise you
* may have cluster issues. You can iterate over the causes, rows and last
* known server addresses via {@link #getNumExceptions()} and
* {@link #getCause(int)}, {@link #getRow(int)} and {@link #getAddress(int)}.
* {@link #getCause(int)}, {@link #getRow(int)} and {@link #getHostnamePort(int)}.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@ -79,10 +76,6 @@ extends RetriesExhaustedException {
return actions.get(i);
}
public HServerAddress getAddress(int i) {
return new HServerAddress(Addressing.createInetSocketAddressFromHostAndPortStr(getHostnamePort(i)));
}
public String getHostnamePort(final int i) {
return this.hostnameAndPort.get(i);
}

View File

@ -187,7 +187,7 @@ public class ScannerCallable extends ServerCallable<Result[]> {
connection.relocateRegion(tableName, scan.getStartRow());
LOG.info("Scanner=" + scannerId
+ " expired, current region location is " + location.toString()
+ " ip:" + location.getServerAddress().getBindAddress());
+ " ip:" + location.getHostnamePort());
} catch (Throwable t) {
LOG.info("Failed to relocate region", t);
}
@ -275,7 +275,7 @@ public class ScannerCallable extends ServerCallable<Result[]> {
if (logScannerActivity) {
LOG.info("Open scanner=" + id + " for scan=" + scan.toString()
+ " on region " + this.location.toString() + " ip:"
+ this.location.getServerAddress().getBindAddress());
+ this.location.getHostnamePort());
}
return id;
} catch (ServiceException se) {

View File

@ -46,8 +46,6 @@ import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Action;
@ -185,8 +183,8 @@ public class HbaseObjectWritable implements Writable, WritableWithSize, Configur
addToMap(HRegion[].class, code++);
addToMap(HRegionInfo.class, code++);
addToMap(HRegionInfo[].class, code++);
addToMap(HServerAddress.class, code++);
addToMap(HServerInfo.class, code++);
code++; // Removed
code++; // Removed
addToMap(HTableDescriptor.class, code++);
addToMap(MapWritable.class, code++);

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.mapred;
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.MutationSerialization;
@ -33,6 +33,7 @@ import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.jobcontrol.Job;
/**
* Utility for {@link TableMap} and {@link TableReduce}
@ -157,10 +158,9 @@ public class TableMapReduceUtil {
MutationSerialization.class.getName(), ResultSerialization.class.getName());
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
HTable outputTable = new HTable(HBaseConfiguration.create(job), table);
int regions = outputTable.getRegionsInfo().size();
int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(outputTable.getRegionsInfo().size());
job.setNumReduceTasks(regions);
}
} else if (partitioner != null) {
job.setPartitionerClass(partitioner);
@ -192,8 +192,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumReduceTasks(String table, JobConf job)
throws IOException {
HTable outputTable = new HTable(HBaseConfiguration.create(job), table);
int regions = outputTable.getRegionsInfo().size();
int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions);
}
@ -208,8 +207,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumMapTasks(String table, JobConf job)
throws IOException {
HTable outputTable = new HTable(HBaseConfiguration.create(job), table);
int regions = outputTable.getRegionsInfo().size();
int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumMapTasks() > regions)
job.setNumMapTasks(regions);
}
@ -224,9 +222,7 @@ public class TableMapReduceUtil {
*/
public static void setNumReduceTasks(String table, JobConf job)
throws IOException {
HTable outputTable = new HTable(HBaseConfiguration.create(job), table);
int regions = outputTable.getRegionsInfo().size();
job.setNumReduceTasks(regions);
job.setNumReduceTasks(MetaReader.getRegionCount(HBaseConfiguration.create(job), table));
}
/**
@ -239,9 +235,7 @@ public class TableMapReduceUtil {
*/
public static void setNumMapTasks(String table, JobConf job)
throws IOException {
HTable outputTable = new HTable(HBaseConfiguration.create(job), table);
int regions = outputTable.getRegionsInfo().size();
job.setNumMapTasks(regions);
job.setNumMapTasks(MetaReader.getRegionCount(HBaseConfiguration.create(job), table));
}
/**

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.mapreduce;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@ -33,7 +34,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
@ -160,8 +160,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
Pair<byte[][], byte[][]> keys = table.getStartEndKeys();
if (keys == null || keys.getFirst() == null ||
keys.getFirst().length == 0) {
HRegionLocation regLoc = table.getRegionLocation(
HConstants.EMPTY_BYTE_ARRAY, false);
HRegionLocation regLoc = table.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false);
if (null == regLoc) {
throw new IOException("Expecting at least one region.");
}
@ -177,24 +176,26 @@ extends InputFormat<ImmutableBytesWritable, Result> {
if ( !includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
continue;
}
HServerAddress regionServerAddress =
table.getRegionLocation(keys.getFirst()[i]).getServerAddress();
InetAddress regionAddress =
regionServerAddress.getInetSocketAddress().getAddress();
HRegionLocation location = table.getRegionLocation(keys.getFirst()[i], false);
// The below InetSocketAddress creation does a name resolution.
InetSocketAddress isa = new InetSocketAddress(location.getHostname(), location.getPort());
if (isa.isUnresolved()) {
LOG.warn("Failed resolve " + isa);
}
InetAddress regionAddress = isa.getAddress();
String regionLocation;
try {
regionLocation = reverseDNS(regionAddress);
} catch (NamingException e) {
LOG.error("Cannot resolve the host name for " + regionAddress +
" because of " + e);
regionLocation = regionServerAddress.getHostname();
LOG.error("Cannot resolve the host name for " + regionAddress + " because of " + e);
regionLocation = location.getHostname();
}
byte[] startRow = scan.getStartRow();
byte[] stopRow = scan.getStopRow();
// determine if the given start an stop key fall into the region
byte[] startRow = scan.getStartRow();
byte[] stopRow = scan.getStopRow();
// determine if the given start an stop key fall into the region
if ((startRow.length == 0 || keys.getSecond()[i].length == 0 ||
Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) &&
Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) &&
(stopRow.length == 0 ||
Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) {
byte[] splitStart = startRow.length == 0 ||

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.hbase.mapreduce;
import java.io.IOException;
import java.lang.reflect.Method;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Enumeration;
@ -35,7 +35,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@ -374,10 +374,9 @@ public class TableMapReduceUtil {
job.setOutputValueClass(Writable.class);
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
HTable outputTable = new HTable(conf, table);
int regions = outputTable.getRegionsInfo().size();
int regions = MetaReader.getRegionCount(conf, table);
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(outputTable.getRegionsInfo().size());
job.setNumReduceTasks(regions);
}
} else if (partitioner != null) {
job.setPartitionerClass(partitioner);
@ -400,8 +399,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumReduceTasks(String table, Job job)
throws IOException {
HTable outputTable = new HTable(job.getConfiguration(), table);
int regions = outputTable.getRegionsInfo().size();
int regions = MetaReader.getRegionCount(job.getConfiguration(), table);
if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions);
}
@ -416,9 +414,7 @@ public class TableMapReduceUtil {
*/
public static void setNumReduceTasks(String table, Job job)
throws IOException {
HTable outputTable = new HTable(job.getConfiguration(), table);
int regions = outputTable.getRegionsInfo().size();
job.setNumReduceTasks(regions);
job.setNumReduceTasks(MetaReader.getRegionCount(job.getConfiguration(), table));
}
/**

View File

@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.NoServerForRegionException;
@ -370,20 +371,18 @@ public class RegionSplitter {
Preconditions.checkArgument(!admin.tableExists(tableName),
"Table already exists: " + tableName);
admin.createTable(desc, splitAlgo.split(splitCount));
admin.close();
LOG.debug("Table created! Waiting for regions to show online in META...");
if (!conf.getBoolean("split.verify", true)) {
// NOTE: createTable is synchronous on the table, but not on the regions
HTable table = new HTable(conf, tableName);
int onlineRegions = 0;
while (onlineRegions < splitCount) {
onlineRegions = table.getRegionsInfo().size();
onlineRegions = MetaReader.getRegionCount(conf, tableName);
LOG.debug(onlineRegions + " of " + splitCount + " regions online...");
if (onlineRegions < splitCount) {
Thread.sleep(10 * 1000); // sleep
}
}
table.close();
}
LOG.debug("Finished creating table with " + splitCount + " regions");

View File

@ -1,86 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.hbase.util.Writables;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Tests for {@link HServerAddress}
*/
@Category(SmallTests.class)
public class TestHServerAddress {
@Test
public void testHashCode() {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerAddress hsa2 = new HServerAddress("localhost", 1234);
assertEquals(hsa1.hashCode(), hsa2.hashCode());
HServerAddress hsa3 = new HServerAddress("localhost", 1235);
assertNotSame(hsa1.hashCode(), hsa3.hashCode());
}
@Test
public void testHServerAddress() {
new HServerAddress();
}
@Test
public void testHServerAddressInetSocketAddress() {
HServerAddress hsa1 =
new HServerAddress(new InetSocketAddress("localhost", 1234));
System.out.println(hsa1.toString());
}
@Test
public void testHServerAddressString() {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerAddress hsa2 =
new HServerAddress(new InetSocketAddress("localhost", 1234));
assertTrue(hsa1.equals(hsa2));
}
@Test
public void testHServerAddressHServerAddress() {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerAddress hsa2 = new HServerAddress(hsa1);
assertEquals(hsa1, hsa2);
}
@Test
public void testReadFields() throws IOException {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerAddress hsa2 = new HServerAddress("localhost", 1235);
byte [] bytes = Writables.getBytes(hsa1);
HServerAddress deserialized =
(HServerAddress)Writables.getWritable(bytes, new HServerAddress());
assertEquals(hsa1, deserialized);
bytes = Writables.getBytes(hsa2);
deserialized =
(HServerAddress)Writables.getWritable(bytes, new HServerAddress());
assertNotSame(hsa1, deserialized);
}
}

View File

@ -1,101 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.hbase.util.Writables;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
public class TestHServerInfo {
@Test
public void testHashCodeAndEquals() {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
HServerInfo hsi2 = new HServerInfo(hsa1, 1L, 5678);
HServerInfo hsi3 = new HServerInfo(hsa1, 2L, 5678);
HServerInfo hsi4 = new HServerInfo(hsa1, 1L, 5677);
HServerAddress hsa2 = new HServerAddress("localhost", 1235);
HServerInfo hsi5 = new HServerInfo(hsa2, 1L, 5678);
assertEquals(hsi1.hashCode(), hsi2.hashCode());
assertTrue(hsi1.equals(hsi2));
assertNotSame(hsi1.hashCode(), hsi3.hashCode());
assertFalse(hsi1.equals(hsi3));
assertNotSame(hsi1.hashCode(), hsi4.hashCode());
assertFalse(hsi1.equals(hsi4));
assertNotSame(hsi1.hashCode(), hsi5.hashCode());
assertFalse(hsi1.equals(hsi5));
}
@Test
public void testHServerInfoHServerInfo() {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
HServerInfo hsi2 = new HServerInfo(hsi1);
assertEquals(hsi1, hsi2);
}
@Test
public void testGetServerAddress() {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
assertEquals(hsi1.getServerAddress(), hsa1);
}
@Test
public void testToString() {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
System.out.println(hsi1.toString());
}
@Test
public void testReadFields() throws IOException {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
HServerAddress hsa2 = new HServerAddress("localhost", 1235);
HServerInfo hsi2 = new HServerInfo(hsa2, 1L, 5678);
byte [] bytes = Writables.getBytes(hsi1);
HServerInfo deserialized =
(HServerInfo)Writables.getWritable(bytes, new HServerInfo());
assertEquals(hsi1, deserialized);
bytes = Writables.getBytes(hsi2);
deserialized = (HServerInfo)Writables.getWritable(bytes, new HServerInfo());
assertNotSame(hsa1, deserialized);
}
@Test
public void testCompareTo() {
HServerAddress hsa1 = new HServerAddress("localhost", 1234);
HServerInfo hsi1 = new HServerInfo(hsa1, 1L, 5678);
HServerAddress hsa2 = new HServerAddress("localhost", 1235);
HServerInfo hsi2 = new HServerInfo(hsa2, 1L, 5678);
assertTrue(hsi1.compareTo(hsi1) == 0);
assertTrue(hsi2.compareTo(hsi2) == 0);
int compare1 = hsi1.compareTo(hsi2);
int compare2 = hsi2.compareTo(hsi1);
assertTrue((compare1 > 0)? compare2 < 0: compare2 > 0);
}
}

View File

@ -343,7 +343,7 @@ public class TestAdmin {
public void testHColumnValidName() {
boolean exceptionThrown = false;
try {
HColumnDescriptor fam1 = new HColumnDescriptor("\\test\\abc");
new HColumnDescriptor("\\test\\abc");
} catch(IllegalArgumentException iae) {
exceptionThrown = true;
assertTrue(exceptionThrown);
@ -526,10 +526,10 @@ public class TestAdmin {
protected void verifyRoundRobinDistribution(HTable ht, int expectedRegions) throws IOException {
int numRS = ht.getConnection().getCurrentNrHRS();
Map<HRegionInfo,HServerAddress> regions = ht.getRegionsInfo();
Map<HServerAddress, List<HRegionInfo>> server2Regions = new HashMap<HServerAddress, List<HRegionInfo>>();
for (Map.Entry<HRegionInfo,HServerAddress> entry : regions.entrySet()) {
HServerAddress server = entry.getValue();
Map<HRegionInfo, ServerName> regions = ht.getRegionLocations();
Map<ServerName, List<HRegionInfo>> server2Regions = new HashMap<ServerName, List<HRegionInfo>>();
for (Map.Entry<HRegionInfo, ServerName> entry : regions.entrySet()) {
ServerName server = entry.getValue();
List<HRegionInfo> regs = server2Regions.get(server);
if (regs == null) {
regs = new ArrayList<HRegionInfo>();
@ -568,7 +568,7 @@ public class TestAdmin {
admin.createTable(desc, splitKeys);
HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
Map<HRegionInfo,HServerAddress> regions = ht.getRegionsInfo();
Map<HRegionInfo, ServerName> regions = ht.getRegionLocations();
assertEquals("Tried to create " + expectedRegions + " regions " +
"but only found " + regions.size(),
expectedRegions, regions.size());
@ -628,7 +628,7 @@ public class TestAdmin {
admin.createTable(desc, startKey, endKey, expectedRegions);
HTable ht2 = new HTable(TEST_UTIL.getConfiguration(), TABLE_2);
regions = ht2.getRegionsInfo();
regions = ht2.getRegionLocations();
assertEquals("Tried to create " + expectedRegions + " regions " +
"but only found " + regions.size(),
expectedRegions, regions.size());
@ -685,7 +685,7 @@ public class TestAdmin {
HTable ht3 = new HTable(TEST_UTIL.getConfiguration(), TABLE_3);
regions = ht3.getRegionsInfo();
regions = ht3.getRegionLocations();
assertEquals("Tried to create " + expectedRegions + " regions " +
"but only found " + regions.size(),
expectedRegions, regions.size());
@ -792,7 +792,7 @@ public class TestAdmin {
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc, splitKeys);
HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
Map<HRegionInfo, HServerAddress> regions = ht.getRegionsInfo();
Map<HRegionInfo, ServerName> regions = ht.getRegionLocations();
ht.close();
assertEquals("Tried to create " + expectedRegions + " regions "
+ "but only found " + regions.size(), expectedRegions, regions.size());
@ -819,6 +819,7 @@ public class TestAdmin {
regioncount++;
serverMap.put(server, regioncount);
}
metaTable.close();
List<Map.Entry<String, Integer>> entryList = new ArrayList<Map.Entry<String, Integer>>(
serverMap.entrySet());
Collections.sort(entryList, new Comparator<Map.Entry<String, Integer>>() {
@ -893,7 +894,7 @@ public class TestAdmin {
}
// get the initial layout (should just be one region)
Map<HRegionInfo,HServerAddress> m = table.getRegionsInfo();
Map<HRegionInfo, ServerName> m = table.getRegionLocations();
System.out.println("Initial regions (" + m.size() + "): " + m);
assertTrue(m.size() == 1);
@ -923,9 +924,9 @@ public class TestAdmin {
continue;
}
// check again table = new HTable(conf, tableName);
Map<HRegionInfo, HServerAddress> regions = null;
Map<HRegionInfo, ServerName> regions = null;
try {
regions = table.getRegionsInfo();
regions = table.getRegionLocations();
} catch (IOException e) {
e.printStackTrace();
}
@ -953,9 +954,9 @@ public class TestAdmin {
scanner.close();
assertEquals(rowCount, rows);
Map<HRegionInfo, HServerAddress> regions = null;
Map<HRegionInfo, ServerName> regions = null;
try {
regions = table.getRegionsInfo();
regions = table.getRegionLocations();
} catch (IOException e) {
e.printStackTrace();
}
@ -991,7 +992,7 @@ public class TestAdmin {
* @throws IOException
*/
@Test (expected=IllegalArgumentException.class)
public void testEmptyHHTableDescriptor() throws IOException {
public void testEmptyHTableDescriptor() throws IOException {
this.admin.createTable(new HTableDescriptor());
}
@ -1223,8 +1224,7 @@ public class TestAdmin {
*/
@Test (expected=TableNotFoundException.class)
public void testTableNotFoundExceptionWithoutAnyTables() throws IOException {
new HTable(TEST_UTIL.getConfiguration(),
"testTableNotFoundExceptionWithoutAnyTables");
new HTable(TEST_UTIL.getConfiguration(),"testTableNotFoundExceptionWithoutAnyTables");
}
@Test
public void testShouldCloseTheRegionBasedOnTheEncodedRegionName()

View File

@ -18,11 +18,15 @@
*/
package org.apache.hadoop.hbase.client;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
@ -44,8 +48,18 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
@ -86,8 +100,6 @@ import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.*;
/**
* Run tests that use the HBase clients; {@link HTable} and {@link HTablePool}.
* Sets up the HBase mini cluster once at start and runs through all client tests.
@ -292,77 +304,6 @@ public class TestFromClientSide {
}
}
/**
* HBASE-2468 use case 1 and 2: region info de/serialization
*/
@Test
public void testRegionCacheDeSerialization() throws Exception {
// 1. test serialization.
LOG.info("Starting testRegionCacheDeSerialization");
final byte[] TABLENAME = Bytes.toBytes("testCachePrewarm2");
final byte[] FAMILY = Bytes.toBytes("family");
Configuration conf = TEST_UTIL.getConfiguration();
TEST_UTIL.createTable(TABLENAME, FAMILY);
// Set up test table:
// Create table:
HTable table = new HTable(conf, TABLENAME);
// Create multiple regions for this table
TEST_UTIL.createMultiRegions(table, FAMILY);
Scan s = new Scan();
ResultScanner scanner = table.getScanner(s);
while (scanner.next() != null) continue;
Path tempPath = new Path(TEST_UTIL.getDataTestDir(), "regions.dat");
final String tempFileName = tempPath.toString();
FileOutputStream fos = new FileOutputStream(tempFileName);
DataOutputStream dos = new DataOutputStream(fos);
// serialize the region info and output to a local file.
table.serializeRegionInfo(dos);
dos.flush();
dos.close();
// read a local file and deserialize the region info from it.
FileInputStream fis = new FileInputStream(tempFileName);
DataInputStream dis = new DataInputStream(fis);
Map<HRegionInfo, HServerAddress> deserRegions =
table.deserializeRegionInfo(dis);
dis.close();
// regions obtained from meta scanner.
Map<HRegionInfo, HServerAddress> loadedRegions =
table.getRegionsInfo();
// set the deserialized regions to the global cache.
table.getConnection().clearRegionCache();
table.getConnection().prewarmRegionCache(table.getTableName(),
deserRegions);
// verify whether the 2 maps are identical or not.
assertEquals("Number of cached region is incorrect",
HConnectionManager.getCachedRegionCount(conf, TABLENAME),
loadedRegions.size());
// verify each region is prefetched or not.
for (Map.Entry<HRegionInfo, HServerAddress> e: loadedRegions.entrySet()) {
HRegionInfo hri = e.getKey();
assertTrue(HConnectionManager.isRegionCached(conf,
hri.getTableName(), hri.getStartKey()));
}
// delete the temp file
File f = new java.io.File(tempFileName);
f.delete();
LOG.info("Finishing testRegionCacheDeSerialization");
}
/**
* HBASE-2468 use case 3:
*/
@ -600,7 +541,7 @@ public class TestFromClientSide {
int rowCount = TEST_UTIL.loadTable(t, FAMILY);
assertRowCount(t, rowCount);
// Split the table. Should split on a reasonable key; 'lqj'
Map<HRegionInfo, HServerAddress> regions = splitTable(t);
Map<HRegionInfo, ServerName> regions = splitTable(t);
assertRowCount(t, rowCount);
// Get end key of first region.
byte [] endKey = regions.keySet().iterator().next().getEndKey();
@ -702,12 +643,13 @@ public class TestFromClientSide {
* @return Map of regions to servers.
* @throws IOException
*/
private Map<HRegionInfo, HServerAddress> splitTable(final HTable t)
private Map<HRegionInfo, ServerName> splitTable(final HTable t)
throws IOException, InterruptedException {
// Split this table in two.
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
admin.split(t.getTableName());
Map<HRegionInfo, HServerAddress> regions = waitOnSplit(t);
admin.close();
Map<HRegionInfo, ServerName> regions = waitOnSplit(t);
assertTrue(regions.size() > 1);
return regions;
}
@ -718,9 +660,9 @@ public class TestFromClientSide {
* @param t
* @return Map of table regions; caller needs to check table actually split.
*/
private Map<HRegionInfo, HServerAddress> waitOnSplit(final HTable t)
private Map<HRegionInfo, ServerName> waitOnSplit(final HTable t)
throws IOException {
Map<HRegionInfo, HServerAddress> regions = t.getRegionsInfo();
Map<HRegionInfo, ServerName> regions = t.getRegionLocations();
int originalCount = regions.size();
for (int i = 0; i < TEST_UTIL.getConfiguration().getInt("hbase.test.retries", 30); i++) {
Thread.currentThread();
@ -729,7 +671,7 @@ public class TestFromClientSide {
} catch (InterruptedException e) {
e.printStackTrace();
}
regions = t.getRegionsInfo();
regions = t.getRegionLocations();
if (regions.size() > originalCount) break;
}
return regions;
@ -4902,11 +4844,8 @@ public class TestFromClientSide {
HRegionInfo regionInfo = regionsMap.keySet().iterator().next();
ServerName addrBefore = regionsMap.get(regionInfo);
// Verify region location before move.
HServerAddress addrCache =
table.getRegionLocation(regionInfo.getStartKey(), false).getServerAddress();
HServerAddress addrNoCache =
table.getRegionLocation(regionInfo.getStartKey(),
true).getServerAddress();
HRegionLocation addrCache = table.getRegionLocation(regionInfo.getStartKey(), false);
HRegionLocation addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true);
assertEquals(addrBefore.getPort(), addrCache.getPort());
assertEquals(addrBefore.getPort(), addrNoCache.getPort());
@ -4927,11 +4866,8 @@ public class TestFromClientSide {
}
// Verify the region was moved.
addrCache =
table.getRegionLocation(regionInfo.getStartKey(), false).getServerAddress();
addrNoCache =
table.getRegionLocation(regionInfo.getStartKey(),
true).getServerAddress();
addrCache = table.getRegionLocation(regionInfo.getStartKey(), false);
addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true);
assertNotNull(addrAfter);
assertTrue(addrAfter.getPort() != addrCache.getPort());
assertEquals(addrAfter.getPort(), addrNoCache.getPort());

View File

@ -40,8 +40,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
@ -486,8 +484,7 @@ public class TestHbaseObjectWritable extends TestCase {
assertEquals(23,HbaseObjectWritable.getClassCode(HRegion[].class).intValue());
assertEquals(24,HbaseObjectWritable.getClassCode(HRegionInfo.class).intValue());
assertEquals(25,HbaseObjectWritable.getClassCode(HRegionInfo[].class).intValue());
assertEquals(26,HbaseObjectWritable.getClassCode(HServerAddress.class).intValue());
assertEquals(27,HbaseObjectWritable.getClassCode(HServerInfo.class).intValue());
// Intentional hole... these objects have been removed.
assertEquals(28,HbaseObjectWritable.getClassCode(HTableDescriptor.class).intValue());
assertEquals(29,HbaseObjectWritable.getClassCode(MapWritable.class).intValue());

View File

@ -358,11 +358,11 @@ public class TestHFileOutputFormat {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
byte[][] startKeys = generateRandomStartKeys(5);
HBaseAdmin admin = null;
try {
util.startMiniCluster();
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
HBaseAdmin admin = new HBaseAdmin(conf);
admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table",
0, util.countRows(table));
@ -402,7 +402,7 @@ public class TestHFileOutputFormat {
util.createMultiRegions(
util.getConfiguration(), table, FAMILIES[0], newStartKeys);
admin.enableTable(table.getTableName());
while (table.getRegionsInfo().size() != 15 ||
while (table.getRegionLocations().size() != 15 ||
!admin.isTableAvailable(table.getTableName())) {
Thread.sleep(200);
LOG.info("Waiting for new region assignment to happen");
@ -440,6 +440,7 @@ public class TestHFileOutputFormat {
assertEquals("Data should remain after reopening of regions",
tableDigestBefore, util.checksumRows(table));
} finally {
if (admin != null) admin.close();
util.shutdownMiniMapReduceCluster();
util.shutdownMiniCluster();
}
@ -456,8 +457,7 @@ public class TestHFileOutputFormat {
Assert.assertFalse( util.getTestFileSystem().exists(outDir)) ;
assertEquals(table.getRegionsInfo().size(),
job.getNumReduceTasks());
assertEquals(table.getRegionLocations().size(), job.getNumReduceTasks());
assertTrue(job.waitForCompletion(true));
}

View File

@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
@ -419,7 +418,7 @@ public class TestEndToEndSplitTransaction {
verifyStartEndKeys(keys);
//HTable.getRegionsInfo()
Map<HRegionInfo, HServerAddress> regions = table.getRegionsInfo();
Map<HRegionInfo, ServerName> regions = table.getRegionLocations();
verifyTableRegions(regions.keySet());
} finally {
IOUtils.closeQuietly(table);

View File

@ -57,7 +57,7 @@ public class TestTableResource {
private static String TABLE = "TestTableResource";
private static String COLUMN_FAMILY = "test";
private static String COLUMN = COLUMN_FAMILY + ":qualifier";
private static Map<HRegionInfo,HServerAddress> regionMap;
private static Map<HRegionInfo, ServerName> regionMap;
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static final HBaseRESTTestingUtility REST_TEST_UTIL =
@ -101,7 +101,7 @@ public class TestTableResource {
}
table.flushCommits();
// get the initial layout (should just be one region)
Map<HRegionInfo,HServerAddress> m = table.getRegionsInfo();
Map<HRegionInfo, ServerName> m = table.getRegionLocations();
assertEquals(m.size(), 1);
// tell the master to split the table
admin.split(TABLE);
@ -115,7 +115,7 @@ public class TestTableResource {
LOG.warn(StringUtils.stringifyException(e));
}
// check again
m = table.getRegionsInfo();
m = table.getRegionLocations();
}
// should have two regions now
@ -152,7 +152,7 @@ public class TestTableResource {
while (regions.hasNext()) {
TableRegionModel region = regions.next();
boolean found = false;
for (Map.Entry<HRegionInfo,HServerAddress> e: regionMap.entrySet()) {
for (Map.Entry<HRegionInfo, ServerName> e: regionMap.entrySet()) {
HRegionInfo hri = e.getKey();
String hriRegionName = hri.getRegionNameAsString();
String regionName = region.getName();
@ -160,7 +160,7 @@ public class TestTableResource {
found = true;
byte[] startKey = hri.getStartKey();
byte[] endKey = hri.getEndKey();
InetSocketAddress sa = e.getValue().getInetSocketAddress();
InetSocketAddress sa = new InetSocketAddress(e.getValue().getHostname(), e.getValue().getPort());
String location = sa.getHostName() + ":" +
Integer.valueOf(sa.getPort());
assertEquals(hri.getRegionId(), region.getId());

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.hbase.security.access;
import static org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
import static org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -29,17 +27,11 @@ import java.security.PrivilegedExceptionAction;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Lists;
import com.google.protobuf.BlockingRpcChannel;
import com.google.protobuf.ByteString;
import com.google.protobuf.RpcChannel;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.ServerName;
@ -54,13 +46,14 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.security.AccessDeniedException;
@ -72,6 +65,11 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists;
import com.google.protobuf.BlockingRpcChannel;
import com.google.protobuf.ByteString;
import com.google.protobuf.ServiceException;
/**
* Performs authorization checks for common operations, according to different
* levels of authorized users.
@ -385,8 +383,8 @@ public class TestAccessController {
@Test
public void testMove() throws Exception {
HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE);
Map<HRegionInfo, HServerAddress> regions = table.getRegionsInfo();
final Map.Entry<HRegionInfo, HServerAddress> firstRegion = regions.entrySet().iterator().next();
Map<HRegionInfo, ServerName> regions = table.getRegionLocations();
final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
final ServerName server = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName();
PrivilegedExceptionAction action = new PrivilegedExceptionAction() {
public Object run() throws Exception {
@ -403,8 +401,8 @@ public class TestAccessController {
@Test
public void testAssign() throws Exception {
HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE);
Map<HRegionInfo, HServerAddress> regions = table.getRegionsInfo();
final Map.Entry<HRegionInfo, HServerAddress> firstRegion = regions.entrySet().iterator().next();
Map<HRegionInfo, ServerName> regions = table.getRegionLocations();
final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
PrivilegedExceptionAction action = new PrivilegedExceptionAction() {
public Object run() throws Exception {
@ -421,8 +419,8 @@ public class TestAccessController {
@Test
public void testUnassign() throws Exception {
HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE);
Map<HRegionInfo, HServerAddress> regions = table.getRegionsInfo();
final Map.Entry<HRegionInfo, HServerAddress> firstRegion = regions.entrySet().iterator().next();
Map<HRegionInfo, ServerName> regions = table.getRegionLocations();
final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();
PrivilegedExceptionAction action = new PrivilegedExceptionAction() {
public Object run() throws Exception {

View File

@ -18,12 +18,13 @@
*/
package org.apache.hadoop.hbase.util;
import static org.junit.Assert.*;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
@ -31,9 +32,11 @@ import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit;
import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
import org.apache.hadoop.hbase.util.RegionSplitter.UniformSplit;
@ -296,11 +299,9 @@ public class TestRegionSplitter {
final Configuration conf = UTIL.getConfiguration();
final int numRegions = expectedBounds.size()-1;
final HTable hTable = new HTable(conf, tableName.getBytes());
final Map<HRegionInfo, HServerAddress> regionInfoMap =
hTable.getRegionsInfo();
final Map<HRegionInfo, ServerName> regionInfoMap = hTable.getRegionLocations();
assertEquals(numRegions, regionInfoMap.size());
for (Map.Entry<HRegionInfo, HServerAddress> entry:
regionInfoMap.entrySet()) {
for (Map.Entry<HRegionInfo, ServerName> entry: regionInfoMap.entrySet()) {
final HRegionInfo regionInfo = entry.getKey();
byte[] regionStart = regionInfo.getStartKey();
byte[] regionEnd = regionInfo.getEndKey();

View File

@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -169,10 +169,10 @@ public class OfflineMetaRebuildTestCore {
HTableDescriptor htd = tbl.getTableDescriptor();
dumpMeta(htd);
Map<HRegionInfo, HServerAddress> hris = tbl.getRegionsInfo();
for (Entry<HRegionInfo, HServerAddress> e : hris.entrySet()) {
Map<HRegionInfo, ServerName> hris = tbl.getRegionLocations();
for (Entry<HRegionInfo, ServerName> e : hris.entrySet()) {
HRegionInfo hri = e.getKey();
HServerAddress hsa = e.getValue();
ServerName hsa = e.getValue();
if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
&& Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
@ -190,6 +190,7 @@ public class OfflineMetaRebuildTestCore {
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
Delete delete = new Delete(deleteRow);
meta.delete(delete);
meta.close();
}
LOG.info(hri.toString() + hsa.toString());
}
@ -257,6 +258,7 @@ public class OfflineMetaRebuildTestCore {
Result rt : rst) {
count++;
}
t.close();
return count;
}
@ -274,6 +276,7 @@ public class OfflineMetaRebuildTestCore {
LOG.info(Bytes.toString(res.getRow()));
count++;
}
meta.close();
return count;
}
}