HBASE-5688 Convert zk root-region-server znode content to pb

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1308560 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-04-02 21:00:47 +00:00
parent 3e23493543
commit 4e99cb4923
12 changed files with 753 additions and 111 deletions

View File

@ -1,74 +0,0 @@
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.catalog;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
/**
* Makes changes to the location of <code>-ROOT-</code> in ZooKeeper.
*/
@InterfaceAudience.Private
public class RootLocationEditor {
private static final Log LOG = LogFactory.getLog(RootLocationEditor.class);
/**
* Deletes the location of <code>-ROOT-</code> in ZooKeeper.
* @param zookeeper zookeeper reference
* @throws KeeperException unexpected zookeeper exception
*/
public static void deleteRootLocation(ZooKeeperWatcher zookeeper)
throws KeeperException {
LOG.info("Unsetting ROOT region location in ZooKeeper");
try {
// Just delete the node. Don't need any watches, only we will create it.
ZKUtil.deleteNode(zookeeper, zookeeper.rootServerZNode);
} catch(KeeperException.NoNodeException nne) {
// Has already been deleted
}
}
/**
* Sets the location of <code>-ROOT-</code> in ZooKeeper to the
* specified server address.
* @param zookeeper zookeeper reference
* @param location The server hosting <code>-ROOT-</code>
* @throws KeeperException unexpected zookeeper exception
*/
public static void setRootLocation(ZooKeeperWatcher zookeeper,
final ServerName location)
throws KeeperException {
LOG.info("Setting ROOT region location in ZooKeeper as " + location);
try {
ZKUtil.createAndWatch(zookeeper, zookeeper.rootServerZNode,
Bytes.toBytes(location.toString()));
} catch(KeeperException.NodeExistsException nee) {
LOG.debug("ROOT region location already existed, updated location");
ZKUtil.setData(zookeeper, zookeeper.rootServerZNode,
Bytes.toBytes(location.toString()));
}
}
}

View File

@ -24,7 +24,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
@ -920,10 +919,8 @@ public class HConnectionManager {
try { try {
LOG.debug("Looking up root region location in ZK," + LOG.debug("Looking up root region location in ZK," +
" connection=" + this); " connection=" + this);
ServerName servername = RootRegionTracker.dataToServerName( ServerName servername =
ZKUtil.blockUntilAvailable( RootRegionTracker.blockUntilAvailable(zkw, this.rpcTimeout);
zkw, zkw.rootServerZNode, this.rpcTimeout)
);
LOG.debug("Looked up root region location, connection=" + this + LOG.debug("Looked up root region location, connection=" + this +
"; serverName=" + ((servername == null) ? "null" : servername)); "; serverName=" + ((servername == null) ? "null" : servername));

View File

@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.catalog.RootLocationEditor;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.executor.EventHandler.EventType;
@ -79,6 +78,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTable; import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@ -2151,7 +2151,7 @@ public class AssignmentManager extends ZooKeeperListener {
* @throws KeeperException * @throws KeeperException
*/ */
public void assignRoot() throws KeeperException { public void assignRoot() throws KeeperException {
RootLocationEditor.deleteRootLocation(this.master.getZooKeeper()); RootRegionTracker.deleteRootLocation(this.master.getZooKeeper());
assign(HRegionInfo.ROOT_REGIONINFO, true); assign(HRegionInfo.ROOT_REGIONINFO, true);
} }

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.protobuf;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Protobufs utility.
*/
public class ProtobufUtil {
/**
* Magic we put ahead of a serialized protobuf message.
* For example, all znode content is protobuf messages with the below magic
* for preamble.
*/
static final byte [] PB_MAGIC = new byte [] {'P', 'B', 'U', 'F'};
/**
* Prepend the passed bytes with four bytes of magic, {@link #PB_MAGIC}, to flag what
* follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc.
* @param bytes Bytes to decorate
* @return The passed <code>bytes</codes> with magic prepended (Creates a new
* byte array that is <code>bytes.length</code> plus {@link #PB_MAGIC}.length.
*/
public static byte [] prependPBMagic(final byte [] bytes) {
return Bytes.add(PB_MAGIC, bytes);
}
/**
* @param bytes Bytes to check.
* @return True if passed <code>bytes</code> has {@link #PB_MAGIC} for a prefix.
*/
public static boolean isPBMagicPrefix(final byte [] bytes) {
if (bytes == null || bytes.length <= PB_MAGIC.length) return false;
return Bytes.compareTo(PB_MAGIC, 0, PB_MAGIC.length, bytes, 0, PB_MAGIC.length) == 0;
}
/**
* @return Length of {@link #PB_MAGIC}
*/
public static int lengthOfPBMagic() {
return PB_MAGIC.length;
}
}

View File

@ -0,0 +1,527 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: ZooKeeper.proto
package org.apache.hadoop.hbase.protobuf.generated;
public final class ZooKeeperProtos {
private ZooKeeperProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface RootRegionServerOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .ServerName server = 2;
boolean hasServer();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
}
public static final class RootRegionServer extends
com.google.protobuf.GeneratedMessage
implements RootRegionServerOrBuilder {
// Use RootRegionServer.newBuilder() to construct.
private RootRegionServer(Builder builder) {
super(builder);
}
private RootRegionServer(boolean noInit) {}
private static final RootRegionServer defaultInstance;
public static RootRegionServer getDefaultInstance() {
return defaultInstance;
}
public RootRegionServer getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable;
}
private int bitField0_;
// required .ServerName server = 2;
public static final int SERVER_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_;
public boolean hasServer() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
return server_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
return server_;
}
private void initFields() {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasServer()) {
memoizedIsInitialized = 0;
return false;
}
if (!getServer().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(2, server_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, server_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) obj;
boolean result = true;
result = result && (hasServer() == other.hasServer());
if (hasServer()) {
result = result && getServer()
.equals(other.getServer());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasServer()) {
hash = (37 * hash) + SERVER_FIELD_NUMBER;
hash = (53 * hash) + getServer().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServerOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getServerFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (serverBuilder_ == null) {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
} else {
serverBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDescriptor();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (serverBuilder_ == null) {
result.server_ = server_;
} else {
result.server_ = serverBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance()) return this;
if (other.hasServer()) {
mergeServer(other.getServer());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasServer()) {
return false;
}
if (!getServer().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
if (hasServer()) {
subBuilder.mergeFrom(getServer());
}
input.readMessage(subBuilder, extensionRegistry);
setServer(subBuilder.buildPartial());
break;
}
}
}
}
private int bitField0_;
// required .ServerName server = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_;
public boolean hasServer() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
if (serverBuilder_ == null) {
return server_;
} else {
return serverBuilder_.getMessage();
}
}
public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
server_ = value;
onChanged();
} else {
serverBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setServer(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (serverBuilder_ == null) {
server_ = builderForValue.build();
onChanged();
} else {
serverBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
server_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial();
} else {
server_ = value;
}
onChanged();
} else {
serverBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearServer() {
if (serverBuilder_ == null) {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
onChanged();
} else {
serverBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getServerFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
if (serverBuilder_ != null) {
return serverBuilder_.getMessageOrBuilder();
} else {
return server_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getServerFieldBuilder() {
if (serverBuilder_ == null) {
serverBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
server_,
getParentForChildren(),
isClean());
server_ = null;
}
return serverBuilder_;
}
// @@protoc_insertion_point(builder_scope:RootRegionServer)
}
static {
defaultInstance = new RootRegionServer(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RootRegionServer)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RootRegionServer_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RootRegionServer_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020RootRe" +
"gionServer\022\033\n\006server\030\002 \002(\0132\013.ServerNameB" +
"E\n*org.apache.hadoop.hbase.protobuf.gene" +
"ratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_RootRegionServer_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_RootRegionServer_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RootRegionServer_descriptor,
new java.lang.String[] { "Server", },
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.class,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.Builder.class);
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}

View File

@ -84,7 +84,6 @@ import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.catalog.RootLocationEditor;
import org.apache.hadoop.hbase.client.Action; import org.apache.hadoop.hbase.client.Action;
import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
@ -148,6 +147,7 @@ import org.apache.hadoop.hbase.util.Sleeper;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.SchemaChangeTracker; import org.apache.hadoop.hbase.zookeeper.SchemaChangeTracker;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
@ -1634,7 +1634,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
} }
// Update ZK, ROOT or META // Update ZK, ROOT or META
if (r.getRegionInfo().isRootRegion()) { if (r.getRegionInfo().isRootRegion()) {
RootLocationEditor.setRootLocation(getZooKeeper(), RootRegionTracker.setRootLocation(getZooKeeper(),
this.serverNameFromMasterPOV); this.serverNameFromMasterPOV);
} else if (r.getRegionInfo().isMetaRegion()) { } else if (r.getRegionInfo().isMetaRegion()) {
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(), MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),

View File

@ -1076,18 +1076,15 @@ public class HBaseFsck {
} }
private ServerName getRootRegionServerName() private ServerName getRootRegionServerName()
throws IOException, KeeperException { throws IOException, KeeperException {
ZooKeeperWatcher zkw = createZooKeeperWatcher(); ZooKeeperWatcher zkw = createZooKeeperWatcher();
ServerName sn = null;
byte[] data;
try { try {
data = ZKUtil.getData(zkw, zkw.rootServerZNode); sn = RootRegionTracker.getRootRegionLocation(zkw);
} finally { } finally {
zkw.close(); zkw.close();
} }
return sn;
return RootRegionTracker.dataToServerName(data);
} }
/** /**

View File

@ -1,6 +1,4 @@
/** /**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -22,9 +20,15 @@ package org.apache.hadoop.hbase.zookeeper;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.RootLocationEditor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer;
import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.zookeeper.KeeperException;
import com.google.protobuf.InvalidProtocolBufferException;
/** /**
* Tracks the root region server location node in zookeeper. * Tracks the root region server location node in zookeeper.
@ -55,14 +59,26 @@ public class RootRegionTracker extends ZooKeeperNodeTracker {
} }
/** /**
* Gets the root region location, if available. Null if not. Does not block. * Gets the root region location, if available. Does not block. Sets a watcher.
* @return server name * @return server name or null if we failed to get the data.
* @throws InterruptedException * @throws InterruptedException
*/ */
public ServerName getRootRegionLocation() throws InterruptedException { public ServerName getRootRegionLocation() throws InterruptedException {
return dataToServerName(super.getData(true)); return dataToServerName(super.getData(true));
} }
/**
* Gets the root region location, if available. Does not block. Does not set
* a watcher (In this regard it differs from {@link #getRootRegionLocation()}.
* @param zkw
* @return server name or null if we failed to get the data.
* @throws KeeperException
*/
public static ServerName getRootRegionLocation(final ZooKeeperWatcher zkw)
throws KeeperException {
return dataToServerName(ZKUtil.getData(zkw, zkw.rootServerZNode));
}
/** /**
* Gets the root region location, if available, and waits for up to the * Gets the root region location, if available, and waits for up to the
* specified timeout if not immediately available. * specified timeout if not immediately available.
@ -84,16 +100,98 @@ public class RootRegionTracker extends ZooKeeperNodeTracker {
return dataToServerName(super.blockUntilAvailable(timeout, true)); return dataToServerName(super.blockUntilAvailable(timeout, true));
} }
/* /**
* Sets the location of <code>-ROOT-</code> in ZooKeeper to the
* specified server address.
* @param zookeeper zookeeper reference
* @param location The server hosting <code>-ROOT-</code>
* @throws KeeperException unexpected zookeeper exception
*/
public static void setRootLocation(ZooKeeperWatcher zookeeper,
final ServerName location)
throws KeeperException {
LOG.info("Setting ROOT region location in ZooKeeper as " + location);
// Make the RootRegionServer pb and then get its bytes and save this as
// the znode content.
byte [] data = getRootRegionServerZNodeContent(location);
try {
ZKUtil.createAndWatch(zookeeper, zookeeper.rootServerZNode, data);
} catch(KeeperException.NodeExistsException nee) {
LOG.debug("ROOT region location already existed, updated location");
ZKUtil.setData(zookeeper, zookeeper.rootServerZNode, data);
}
}
/**
* Build up the znode content.
* @param sn What to put into the znode.
* @return The content of the root-region-server znode
*/
static byte [] getRootRegionServerZNodeContent(final ServerName sn) {
// ZNode content is a pb message preceeded by some pb magic.
HBaseProtos.ServerName pbsn =
HBaseProtos.ServerName.newBuilder().setHostName(sn.getHostname()).
setPort(sn.getPort()).setStartCode(sn.getStartcode()).build();
ZooKeeperProtos.RootRegionServer pbrsr =
ZooKeeperProtos.RootRegionServer.newBuilder().setServer(pbsn).build();
return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
}
/**
* Deletes the location of <code>-ROOT-</code> in ZooKeeper.
* @param zookeeper zookeeper reference
* @throws KeeperException unexpected zookeeper exception
*/
public static void deleteRootLocation(ZooKeeperWatcher zookeeper)
throws KeeperException {
LOG.info("Unsetting ROOT region location in ZooKeeper");
try {
// Just delete the node. Don't need any watches.
ZKUtil.deleteNode(zookeeper, zookeeper.rootServerZNode);
} catch(KeeperException.NoNodeException nne) {
// Has already been deleted
}
}
/**
* Wait until the root region is available.
* @param zkw
* @param timeout
* @return ServerName or null if we timed out.
* @throws InterruptedException
*/
public static ServerName blockUntilAvailable(final ZooKeeperWatcher zkw,
final long timeout)
throws InterruptedException {
byte [] data = ZKUtil.blockUntilAvailable(zkw, zkw.rootServerZNode, timeout);
return dataToServerName(data);
}
/**
* @param data * @param data
* @return Returns null if <code>data</code> is null else converts passed data * @return Returns null if <code>data</code> is null else converts passed data
* to a ServerName instance. * to a ServerName instance.
*/ */
public static ServerName dataToServerName(final byte [] data) { static ServerName dataToServerName(final byte [] data) {
if (data == null || data.length <= 0) return null;
if (ProtobufUtil.isPBMagicPrefix(data)) {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
try {
RootRegionServer rss =
RootRegionServer.newBuilder().mergeFrom(data, prefixLen, data.length - prefixLen).build();
HBaseProtos.ServerName sn = rss.getServer();
return new ServerName(sn.getHostName(), sn.getPort(), sn.getStartCode());
} catch (InvalidProtocolBufferException e) {
// A failed parse of the znode is pretty catastrophic. Rather than loop
// retrying hoping the bad bytes will changes, and rather than change
// the signature on this method to add an IOE which will send ripples all
// over the code base, throw a RuntimeException. This should "never" happen.
throw new RuntimeException(e);
}
}
// The str returned could be old style -- pre hbase-1502 -- which was // The str returned could be old style -- pre hbase-1502 -- which was
// hostname and port seperated by a colon rather than hostname, port and // hostname and port seperated by a colon rather than hostname, port and
// startcode delimited by a ','. // startcode delimited by a ','.
if (data == null || data.length <= 0) return null;
String str = Bytes.toString(data); String str = Bytes.toString(data);
int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR); int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR);
if (index != -1) { if (index != -1) {

View File

@ -0,0 +1,36 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// ZNode data in hbase are serialized protobufs with a four byte
// 'magic' 'PBUF' prefix.
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "ZooKeeperProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "hbase.proto";
/**
* Content of the root-region-server znode.
*/
message RootRegionServer {
// The ServerName hosting the root region currently.
required ServerName server = 1;
}

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -118,12 +119,12 @@ public class TestCatalogTracker {
HConnection connection = Mockito.mock(HConnection.class); HConnection connection = Mockito.mock(HConnection.class);
constructAndStartCatalogTracker(connection); constructAndStartCatalogTracker(connection);
try { try {
RootLocationEditor.setRootLocation(this.watcher, RootRegionTracker.setRootLocation(this.watcher,
new ServerName("example.com", 1234, System.currentTimeMillis())); new ServerName("example.com", 1234, System.currentTimeMillis()));
} finally { } finally {
// Clean out root location or later tests will be confused... they presume // Clean out root location or later tests will be confused... they presume
// start fresh in zk. // start fresh in zk.
RootLocationEditor.deleteRootLocation(this.watcher); RootRegionTracker.deleteRootLocation(this.watcher);
} }
} }
@ -201,7 +202,7 @@ public class TestCatalogTracker {
final CatalogTracker ct = constructAndStartCatalogTracker(connection); final CatalogTracker ct = constructAndStartCatalogTracker(connection);
try { try {
// Set a location for root and meta. // Set a location for root and meta.
RootLocationEditor.setRootLocation(this.watcher, SN); RootRegionTracker.setRootLocation(this.watcher, SN);
ct.setMetaLocation(SN); ct.setMetaLocation(SN);
// Call the method that HBASE-4288 calls. It will try and verify the // Call the method that HBASE-4288 calls. It will try and verify the
// meta location and will fail on first attempt then go into a long wait. // meta location and will fail on first attempt then go into a long wait.
@ -231,7 +232,7 @@ public class TestCatalogTracker {
// Clean out root and meta locations or later tests will be confused... // Clean out root and meta locations or later tests will be confused...
// they presume start fresh in zk. // they presume start fresh in zk.
ct.resetMetaLocation(); ct.resetMetaLocation();
RootLocationEditor.deleteRootLocation(this.watcher); RootRegionTracker.deleteRootLocation(this.watcher);
} }
} finally { } finally {
// Clear out our doctored connection or could mess up subsequent tests. // Clear out our doctored connection or could mess up subsequent tests.
@ -258,14 +259,14 @@ public class TestCatalogTracker {
// Now start up the catalogtracker with our doctored Connection. // Now start up the catalogtracker with our doctored Connection.
final CatalogTracker ct = constructAndStartCatalogTracker(connection); final CatalogTracker ct = constructAndStartCatalogTracker(connection);
try { try {
RootLocationEditor.setRootLocation(this.watcher, SN); RootRegionTracker.setRootLocation(this.watcher, SN);
long timeout = UTIL.getConfiguration(). long timeout = UTIL.getConfiguration().
getLong("hbase.catalog.verification.timeout", 1000); getLong("hbase.catalog.verification.timeout", 1000);
Assert.assertFalse(ct.verifyMetaRegionLocation(timeout)); Assert.assertFalse(ct.verifyMetaRegionLocation(timeout));
} finally { } finally {
// Clean out root location or later tests will be confused... they // Clean out root location or later tests will be confused... they
// presume start fresh in zk. // presume start fresh in zk.
RootLocationEditor.deleteRootLocation(this.watcher); RootRegionTracker.deleteRootLocation(this.watcher);
} }
} finally { } finally {
// Clear out our doctored connection or could mess up subsequent tests. // Clear out our doctored connection or could mess up subsequent tests.
@ -294,13 +295,13 @@ public class TestCatalogTracker {
thenReturn(implementation); thenReturn(implementation);
final CatalogTracker ct = constructAndStartCatalogTracker(connection); final CatalogTracker ct = constructAndStartCatalogTracker(connection);
try { try {
RootLocationEditor.setRootLocation(this.watcher, RootRegionTracker.setRootLocation(this.watcher,
new ServerName("example.com", 1234, System.currentTimeMillis())); new ServerName("example.com", 1234, System.currentTimeMillis()));
Assert.assertFalse(ct.verifyRootRegionLocation(100)); Assert.assertFalse(ct.verifyRootRegionLocation(100));
} finally { } finally {
// Clean out root location or later tests will be confused... they presume // Clean out root location or later tests will be confused... they presume
// start fresh in zk. // start fresh in zk.
RootLocationEditor.deleteRootLocation(this.watcher); RootRegionTracker.deleteRootLocation(this.watcher);
} }
} }
@ -350,7 +351,7 @@ public class TestCatalogTracker {
} }
private ServerName setRootLocation() throws KeeperException { private ServerName setRootLocation() throws KeeperException {
RootLocationEditor.setRootLocation(this.watcher, SN); RootRegionTracker.setRootLocation(this.watcher, SN);
return SN; return SN;
} }

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.catalog;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import org.junit.Test; import org.junit.Test;
@ -61,7 +62,7 @@ public class TestCatalogTrackerOnCluster {
}); });
ServerName nonsense = ServerName nonsense =
new ServerName("example.org", 1234, System.currentTimeMillis()); new ServerName("example.org", 1234, System.currentTimeMillis());
RootLocationEditor.setRootLocation(zookeeper, nonsense); RootRegionTracker.setRootLocation(zookeeper, nonsense);
// Bring back up the hbase cluster. See if it can deal with nonsense root // Bring back up the hbase cluster. See if it can deal with nonsense root
// location. The cluster should start and be fully available. // location. The cluster should start and be fully available.

View File

@ -35,13 +35,13 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.RootLocationEditor;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.io.MapWritable; import org.apache.hadoop.io.MapWritable;
@ -139,7 +139,7 @@ public class TestMasterNoCluster {
// Put some data into the servers. Make it look like sn0 has the root // Put some data into the servers. Make it look like sn0 has the root
// w/ an entry that points to sn1 as the host of .META. Put data into sn2 // w/ an entry that points to sn1 as the host of .META. Put data into sn2
// so it looks like it has a few regions for a table named 't'. // so it looks like it has a few regions for a table named 't'.
RootLocationEditor.setRootLocation(rs0.getZooKeeper(), rs0.getServerName()); RootRegionTracker.setRootLocation(rs0.getZooKeeper(), rs0.getServerName());
byte [] rootregion = Bytes.toBytes("-ROOT-,,0"); byte [] rootregion = Bytes.toBytes("-ROOT-,,0");
rs0.setGetResult(rootregion, HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), rs0.setGetResult(rootregion, HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),
Mocking.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO, Mocking.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO,
@ -300,7 +300,7 @@ public class TestMasterNoCluster {
// when its figured it just opened the root region by setting the root // when its figured it just opened the root region by setting the root
// location up into zk. Since we're mocking regionserver, need to do this // location up into zk. Since we're mocking regionserver, need to do this
// ourselves. // ourselves.
RootLocationEditor.setRootLocation(rs0.getZooKeeper(), rs0.getServerName()); RootRegionTracker.setRootLocation(rs0.getZooKeeper(), rs0.getServerName());
// Do same transitions for .META. (presuming master has by now assigned // Do same transitions for .META. (presuming master has by now assigned
// .META. to rs1). // .META. to rs1).
Mocking.fakeRegionServerRegionOpenInZK(rs0.getZooKeeper(), Mocking.fakeRegionServerRegionOpenInZK(rs0.getZooKeeper(),