HBASE-8506 Remove unused/dead classes

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1480493 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-05-08 22:51:18 +00:00
parent 4dc244ce5a
commit cefa4fa671
11 changed files with 1 additions and 760 deletions

View File

@ -1,96 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.io.compress.Compression;
/**
* Immutable HColumnDescriptor
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
/**
* @param desc wrapped
*/
public UnmodifyableHColumnDescriptor (final HColumnDescriptor desc) {
super(desc);
}
/**
* @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(byte[], byte[])
*/
@Override
public HColumnDescriptor setValue(byte[] key, byte[] value) {
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
}
/**
* @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(java.lang.String, java.lang.String)
*/
@Override
public HColumnDescriptor setValue(String key, String value) {
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
}
/**
* @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxVersions(int)
*/
@Override
public HColumnDescriptor setMaxVersions(int maxVersions) {
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
}
/**
* @see org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)
*/
@Override
public HColumnDescriptor setInMemory(boolean inMemory) {
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
}
/**
* @see org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled(boolean)
*/
@Override
public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
}
/**
* @see org.apache.hadoop.hbase.HColumnDescriptor#setTimeToLive(int)
*/
@Override
public HColumnDescriptor setTimeToLive(int timeToLive) {
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
}
/**
* @see org.apache.hadoop.hbase.HColumnDescriptor#setCompressionType(org.apache.hadoop.hbase.io.compress.Compression.Algorithm)
*/
@Override
public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
}
}

View File

@ -1,48 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.zookeeper;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HRegionInfo;
/**
* Tracks the unassigned zookeeper node used by the META table.
* <p>
* If META is already assigned when instantiating this class, you will not
* receive any notification for that assignment. You will receive a
* notification after META has been successfully assigned to a new location.
*/
@InterfaceAudience.Private
public class MetaNodeTracker extends ZooKeeperNodeTracker {
/**
* Creates a meta node tracker.
* @param watcher
* @param abortable
*/
public MetaNodeTracker(final ZooKeeperWatcher watcher, final Abortable abortable) {
super(watcher, ZKUtil.joinZNode(watcher.assignmentZNode,
HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()), abortable);
}
@Override
public void nodeDeleted(String path) {
super.nodeDeleted(path);
}
}

View File

@ -1,151 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hbase.io.hfile;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FSDataInputStream;
/**
* BoundedRangeFIleInputStream abstracts a contiguous region of a Hadoop
* FSDataInputStream as a regular input stream. One can create multiple
* BoundedRangeFileInputStream on top of the same FSDataInputStream and they
* would not interfere with each other.
* Copied from hadoop-335 tfile.
*/
@InterfaceAudience.Private
class BoundedRangeFileInputStream extends InputStream {
private FSDataInputStream in;
private long pos;
private long end;
private long mark;
private final byte[] oneByte = new byte[1];
private final boolean pread;
/**
* Constructor
*
* @param in
* The FSDataInputStream we connect to.
* @param offset
* Beginning offset of the region.
* @param length
* Length of the region.
* @param pread If true, use Filesystem positional read rather than seek+read.
*
* The actual length of the region may be smaller if (off_begin +
* length) goes beyond the end of FS input stream.
*/
public BoundedRangeFileInputStream(FSDataInputStream in, long offset,
long length, final boolean pread) {
if (offset < 0 || length < 0) {
throw new IndexOutOfBoundsException("Invalid offset/length: " + offset
+ "/" + length);
}
this.in = in;
this.pos = offset;
this.end = offset + length;
this.mark = -1;
this.pread = pread;
}
@Override
public int available() throws IOException {
int avail = in.available();
if (pos + avail > end) {
avail = (int) (end - pos);
}
return avail;
}
@Override
public int read() throws IOException {
int ret = read(oneByte);
if (ret == 1) return oneByte[0] & 0xff;
return -1;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
}
int n = (int) Math.min(Integer.MAX_VALUE, Math.min(len, (end - pos)));
if (n == 0) return -1;
int ret = 0;
if (this.pread) {
ret = in.read(pos, b, off, n);
} else {
synchronized (in) {
in.seek(pos);
ret = in.read(b, off, n);
}
}
if (ret < 0) {
end = pos;
return -1;
}
pos += ret;
return ret;
}
@Override
/*
* We may skip beyond the end of the file.
*/
public long skip(long n) throws IOException {
long len = Math.min(n, end - pos);
pos += len;
return len;
}
@Override
public void mark(int readlimit) {
mark = pos;
}
@Override
public void reset() throws IOException {
if (mark < 0) throw new IOException("Resetting to invalid mark");
pos = mark;
}
@Override
public boolean markSupported() {
return true;
}
@Override
public void close() {
// Invalidate the state of the stream.
in = null;
pos = end;
mark = -1;
}
}

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.ipc;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Utility for managing the flag byte passed in response to a
* {@link RpcServer.Call}
*/
@InterfaceAudience.Private
class ResponseFlag {
private static final byte ERROR_BIT = 0x1;
private static final byte LENGTH_BIT = 0x2;
private ResponseFlag() {
// Make it so this class cannot be constructed.
}
static boolean isError(final byte flag) {
return (flag & ERROR_BIT) != 0;
}
static boolean isLength(final byte flag) {
return (flag & LENGTH_BIT) != 0;
}
static byte getLengthSetOnly() {
return LENGTH_BIT;
}
static byte getErrorAndLengthSet() {
return LENGTH_BIT | ERROR_BIT;
}
}

View File

@ -1980,9 +1980,8 @@ public class RpcServer implements RpcServerInterface {
*
* @param response buffer to serialize the response into
* @param call {@link Call} to which we are setting up the response
* @param status {@link Status} of the IPC call
* @param errorClass error class, if the the call failed
* @param error error message, if the call failed
* @param t
* @throws IOException
*/
private void setupResponse(ByteArrayOutputStream response, Call call, Throwable t, String error)

View File

@ -1,35 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.ipc;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Status of a Hadoop IPC call.
*/
@InterfaceAudience.Private
enum Status {
SUCCESS (0),
ERROR (1),
FATAL (-1);
int state;
private Status(int state) {
this.state = state;
}
}

View File

@ -1,102 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.metrics;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.metrics.MetricsMBeanBase;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.metrics.util.MetricsRegistry;
import javax.management.ObjectName;
/**
* Exports HBase system information as an MBean for JMX observation.
*/
@Deprecated
@InterfaceAudience.Private
public class HBaseInfo {
protected static class HBaseInfoMBean extends MetricsMBeanBase {
private final ObjectName mbeanName;
public HBaseInfoMBean(MetricsRegistry registry, String rsName) {
super(registry, "HBase cluster information");
// The name seems wrong to me; should include clusterid IMO.
// That would make it harder to locate and rare we have
// two clusters up on single machine. St.Ack 20120309
mbeanName = MBeanUtil.registerMBean("HBase", "Info", this);
}
public void shutdown() {
if (mbeanName != null)
MBeanUtil.unregisterMBean(mbeanName);
}
}
protected final MetricsRecord mr;
protected final HBaseInfoMBean mbean;
protected MetricsRegistry registry = new MetricsRegistry();
private static HBaseInfo theInstance = null;
public synchronized static HBaseInfo init() {
if (theInstance == null) {
theInstance = new HBaseInfo();
}
return theInstance;
}
{
// HBase jar info
new MetricsString("date", registry,
org.apache.hadoop.hbase.util.VersionInfo.getDate());
new MetricsString("revision", registry,
org.apache.hadoop.hbase.util.VersionInfo.getRevision());
new MetricsString("url", registry, org.apache.hadoop.hbase.util.VersionInfo
.getUrl());
new MetricsString("user", registry,
org.apache.hadoop.hbase.util.VersionInfo.getUser());
new MetricsString("version", registry,
org.apache.hadoop.hbase.util.VersionInfo.getVersion());
// Info on the HDFS jar that HBase has (aka: HDFS Client)
new MetricsString("hdfsDate", registry, org.apache.hadoop.util.VersionInfo
.getDate());
new MetricsString("hdfsRevision", registry,
org.apache.hadoop.util.VersionInfo.getRevision());
new MetricsString("hdfsUrl", registry, org.apache.hadoop.util.VersionInfo
.getUrl());
new MetricsString("hdfsUser", registry, org.apache.hadoop.util.VersionInfo
.getUser());
new MetricsString("hdfsVersion", registry,
org.apache.hadoop.util.VersionInfo.getVersion());
}
protected HBaseInfo() {
MetricsContext context = MetricsUtil.getContext("hbase");
mr = MetricsUtil.createRecord(context, "info");
String name = Thread.currentThread().getName();
mr.setTag("Info", name);
// export for JMX
mbean = new HBaseInfoMBean(this.registry, name);
}
}

View File

@ -1,113 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.metrics.file;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics.ContextFactory;
import org.apache.hadoop.metrics.file.FileContext;
import org.apache.hadoop.metrics.spi.OutputRecord;
/**
* Add timestamp to {@link org.apache.hadoop.metrics.file.FileContext#emitRecord(String, String, OutputRecord)}.
*/
@Deprecated
@InterfaceAudience.Private
public class TimeStampingFileContext extends FileContext {
// Copies bunch of FileContext here because writer and file are private in
// superclass.
private File file = null;
private PrintWriter writer = null;
private final SimpleDateFormat sdf;
public TimeStampingFileContext() {
super();
this.sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
}
@Override
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
String fileName = getAttribute(FILE_NAME_PROPERTY);
if (fileName != null) {
file = new File(fileName);
}
}
@Override
public void startMonitoring() throws IOException {
if (file == null) {
writer = new PrintWriter(new BufferedOutputStream(System.out));
} else {
writer = new PrintWriter(new FileWriter(file, true));
}
super.startMonitoring();
}
@Override
public void stopMonitoring() {
super.stopMonitoring();
if (writer != null) {
writer.close();
writer = null;
}
}
private synchronized String iso8601() {
return this.sdf.format(new Date());
}
@Override
public void emitRecord(String contextName, String recordName,
OutputRecord outRec) {
writer.print(iso8601());
writer.print(" ");
writer.print(contextName);
writer.print(".");
writer.print(recordName);
String separator = ": ";
for (String tagName : outRec.getTagNames()) {
writer.print(separator);
separator = ", ";
writer.print(tagName);
writer.print("=");
writer.print(outRec.getTag(tagName));
}
for (String metricName : outRec.getMetricNames()) {
writer.print(separator);
separator = ", ";
writer.print(metricName);
writer.print("=");
writer.print(outRec.getMetric(metricName));
}
writer.println();
}
@Override
public void flush() {
writer.flush();
}
}

View File

@ -1,71 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.io.FileWriter;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public class DebugPrint {
private static final AtomicBoolean enabled = new AtomicBoolean(false);
private static final Object sync = new Object();
public static StringBuilder out = new StringBuilder();
static public void enable() {
enabled.set(true);
}
static public void disable() {
enabled.set(false);
}
static public void reset() {
synchronized (sync) {
enable(); // someone wants us enabled basically.
out = new StringBuilder();
}
}
static public void dumpToFile(String file) throws IOException {
FileWriter f = new FileWriter(file);
synchronized (sync) {
f.write(out.toString());
}
f.close();
}
public static void println(String m) {
if (!enabled.get()) {
System.out.println(m);
return;
}
synchronized (sync) {
String threadName = Thread.currentThread().getName();
out.append("<");
out.append(threadName);
out.append("> ");
out.append(m);
out.append("\n");
}
}
}

View File

@ -1,46 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.handler;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
/**
* Handles closing of the root region on a region server.
*/
@InterfaceAudience.Private
public class CloseRootHandler extends CloseRegionHandler {
// This is executed after receiving an CLOSE RPC from the master for root.
public CloseRootHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo) {
this(server, rsServices, regionInfo, false, true, -1);
}
// This is called directly by the regionserver when its determined its
// shutting down.
public CloseRootHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
final boolean abort, final boolean zk, final int versionOfClosingNode) {
super(server, rsServices, regionInfo, abort, zk, versionOfClosingNode,
EventType.M_RS_CLOSE_ROOT);
}
}

View File

@ -1,46 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.handler;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
/**
* Handles opening of the root region on a region server.
* <p>
* This is executed after receiving an OPEN RPC from the master for root.
*/
@InterfaceAudience.Private
public class OpenRootHandler extends OpenRegionHandler {
public OpenRootHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
final HTableDescriptor htd) {
super(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_ROOT, -1);
}
public OpenRootHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
final HTableDescriptor htd, int versionOfOfflineNode) {
super(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_ROOT,
versionOfOfflineNode);
}
}