HBASE-14713 Remove simple deprecated-since-1.0 code in hbase-server from hbase 2.0
This commit is contained in:
parent
eb6b2a1a04
commit
48be35cb7e
|
@ -66,19 +66,6 @@ public class CoprocessorHConnection extends ConnectionImplementation {
|
|||
private final ServerName serverName;
|
||||
private final HRegionServer server;
|
||||
|
||||
/**
|
||||
* Legacy constructor
|
||||
* @param delegate
|
||||
* @param server
|
||||
* @throws IOException if we cannot create the connection
|
||||
* @deprecated delegate is not used
|
||||
*/
|
||||
@Deprecated
|
||||
public CoprocessorHConnection(ClusterConnection delegate, HRegionServer server)
|
||||
throws IOException {
|
||||
this(server);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor that uses server configuration
|
||||
* @param server
|
||||
|
|
|
@ -128,7 +128,7 @@ public final class HTableWrapper implements Table {
|
|||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #existsAll(java.util.List)} instead.
|
||||
* @deprecated Use {@link #existsAll(java.util.List)} instead. since 2.0. remove in 3.0
|
||||
*/
|
||||
@Deprecated
|
||||
public Boolean[] exists(List<Get> gets) throws IOException {
|
||||
|
|
|
@ -78,7 +78,7 @@ public class TableInputFormat extends TableInputFormatBase implements
|
|||
}
|
||||
|
||||
// connected to table?
|
||||
if (getHTable() == null) {
|
||||
if (getTable() == null) {
|
||||
throw new IOException("could not connect to table '" +
|
||||
tableNames[0].getName() + "'");
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
|
@ -249,15 +248,6 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
this.inputColumns = inputColumns;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows subclasses to get the {@link HTable}.
|
||||
* @deprecated use {@link #getTable()}
|
||||
*/
|
||||
@Deprecated
|
||||
protected HTable getHTable() {
|
||||
return (HTable) getTable();
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows subclasses to get the {@link Table}.
|
||||
*/
|
||||
|
@ -268,17 +258,6 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
return this.table;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows subclasses to set the {@link HTable}.
|
||||
*
|
||||
* @param table to get the data from
|
||||
* @deprecated use {@link #initializeTable(Connection,TableName)}
|
||||
*/
|
||||
@Deprecated
|
||||
protected void setHTable(HTable table) {
|
||||
this.table = table;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows subclasses to set the {@link TableRecordReader}.
|
||||
*
|
||||
|
|
|
@ -1,202 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.mapreduce;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
import org.apache.hadoop.mapreduce.RecordWriter;
|
||||
import org.apache.hadoop.mapreduce.TaskAttemptContext;
|
||||
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* Writes HFiles. Passed KeyValues must arrive in order.
|
||||
* Writes current time as the sequence id for the file. Sets the major compacted
|
||||
* attribute on created hfiles. Calling write(null,null) will forcibly roll
|
||||
* all HFiles being written.
|
||||
* <p>
|
||||
* Using this class as part of a MapReduce job is best done
|
||||
* using {@link #configureIncrementalLoad(Job, HTable)}.
|
||||
* @see KeyValueSortReducer
|
||||
* @deprecated use {@link HFileOutputFormat2} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable, KeyValue> {
|
||||
private static final Log LOG = LogFactory.getLog(HFileOutputFormat.class);
|
||||
|
||||
// This constant is public since the client can modify this when setting
|
||||
// up their conf object and thus refer to this symbol.
|
||||
// It is present for backwards compatibility reasons. Use it only to
|
||||
// override the auto-detection of datablock encoding.
|
||||
public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =
|
||||
HFileOutputFormat2.DATABLOCK_ENCODING_OVERRIDE_CONF_KEY;
|
||||
|
||||
@Override
|
||||
public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(
|
||||
final TaskAttemptContext context) throws IOException, InterruptedException {
|
||||
return HFileOutputFormat2.createRecordWriter(context);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure a MapReduce Job to perform an incremental load into the given
|
||||
* table. This
|
||||
* <ul>
|
||||
* <li>Inspects the table to configure a total order partitioner</li>
|
||||
* <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
|
||||
* <li>Sets the number of reduce tasks to match the current number of regions</li>
|
||||
* <li>Sets the output key/value class to match HFileOutputFormat's requirements</li>
|
||||
* <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
|
||||
* PutSortReducer)</li>
|
||||
* </ul>
|
||||
* The user should be sure to set the map output value class to either KeyValue or Put before
|
||||
* running this function.
|
||||
*/
|
||||
public static void configureIncrementalLoad(Job job, HTable table)
|
||||
throws IOException {
|
||||
HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(),
|
||||
table.getRegionLocator());
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs inside the task to deserialize column family to compression algorithm
|
||||
* map from the configuration.
|
||||
*
|
||||
* @param conf to read the serialized values from
|
||||
* @return a map from column family to the configured compression algorithm
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration
|
||||
conf) {
|
||||
return HFileOutputFormat2.createFamilyCompressionMap(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs inside the task to deserialize column family to bloom filter type
|
||||
* map from the configuration.
|
||||
*
|
||||
* @param conf to read the serialized values from
|
||||
* @return a map from column family to the the configured bloom filter type
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) {
|
||||
return HFileOutputFormat2.createFamilyBloomTypeMap(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs inside the task to deserialize column family to block size
|
||||
* map from the configuration.
|
||||
*
|
||||
* @param conf to read the serialized values from
|
||||
* @return a map from column family to the configured block size
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Map<byte[], Integer> createFamilyBlockSizeMap(Configuration conf) {
|
||||
return HFileOutputFormat2.createFamilyBlockSizeMap(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs inside the task to deserialize column family to data block encoding
|
||||
* type map from the configuration.
|
||||
*
|
||||
* @param conf to read the serialized values from
|
||||
* @return a map from column family to HFileDataBlockEncoder for the
|
||||
* configured data block type for the family
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Map<byte[], DataBlockEncoding> createFamilyDataBlockEncodingMap(
|
||||
Configuration conf) {
|
||||
return HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
|
||||
* <code>splitPoints</code>. Cleans up the partitions file after job exists.
|
||||
*/
|
||||
static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
|
||||
throws IOException {
|
||||
HFileOutputFormat2.configurePartitioner(job, splitPoints);
|
||||
}
|
||||
|
||||
static void configureCompression(Table table, Configuration conf) throws IOException {
|
||||
HFileOutputFormat2.configureCompression(conf, table.getTableDescriptor());
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize column family to block size map to configuration.
|
||||
* Invoked while configuring the MR job for incremental load.
|
||||
*
|
||||
* @param table to read the properties from
|
||||
* @param conf to persist serialized values into
|
||||
* @throws IOException
|
||||
* on failure to read column family descriptors
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static void configureBlockSize(Table table, Configuration conf) throws IOException {
|
||||
HFileOutputFormat2.configureBlockSize(table.getTableDescriptor(), conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize column family to bloom type map to configuration.
|
||||
* Invoked while configuring the MR job for incremental load.
|
||||
*
|
||||
* @param table to read the properties from
|
||||
* @param conf to persist serialized values into
|
||||
* @throws IOException
|
||||
* on failure to read column family descriptors
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static void configureBloomType(Table table, Configuration conf) throws IOException {
|
||||
HFileOutputFormat2.configureBloomType(table.getTableDescriptor(), conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize column family to data block encoding map to configuration.
|
||||
* Invoked while configuring the MR job for incremental load.
|
||||
*
|
||||
* @param table to read the properties from
|
||||
* @param conf to persist serialized values into
|
||||
* @throws IOException
|
||||
* on failure to read column family descriptors
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static void configureDataBlockEncoding(Table table,
|
||||
Configuration conf) throws IOException {
|
||||
HTableDescriptor tableDescriptor = table.getTableDescriptor();
|
||||
HFileOutputFormat2.configureDataBlockEncoding(tableDescriptor, conf);
|
||||
}
|
||||
}
|
|
@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.KeyValueUtil;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
|
@ -411,28 +410,6 @@ public class HFileOutputFormat2
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure a MapReduce Job to perform an incremental load into the given
|
||||
* table. This
|
||||
* <ul>
|
||||
* <li>Inspects the table to configure a total order partitioner</li>
|
||||
* <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
|
||||
* <li>Sets the number of reduce tasks to match the current number of regions</li>
|
||||
* <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
|
||||
* <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
|
||||
* PutSortReducer)</li>
|
||||
* </ul>
|
||||
* The user should be sure to set the map output value class to either KeyValue or Put before
|
||||
* running this function.
|
||||
*
|
||||
* @deprecated Use {@link #configureIncrementalLoad(Job, Table, RegionLocator)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static void configureIncrementalLoad(Job job, HTable table)
|
||||
throws IOException {
|
||||
configureIncrementalLoad(job, table.getTableDescriptor(), table.getRegionLocator());
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure a MapReduce Job to perform an incremental load into the given
|
||||
* table. This
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
|
|||
/**
|
||||
* Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL}
|
||||
* files.
|
||||
* @deprecated use {@link WALInputFormat}
|
||||
* @deprecated use {@link WALInputFormat}. Remove in hadoop 3.0
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.Public
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.mapreduce.Reducer;
|
|||
* Reads in all KeyValues from passed Iterator, sorts them, then emits
|
||||
* KeyValues in sorted order. If lots of columns per row, it will use lots of
|
||||
* memory sorting.
|
||||
* @see HFileOutputFormat
|
||||
* @see HFileOutputFormat2
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
* Reads in all Puts from passed Iterator, sorts them, then emits
|
||||
* Puts in sorted order. If lots of columns per row, it will use lots of
|
||||
* memory sorting.
|
||||
* @see HFileOutputFormat
|
||||
* @see HFileOutputFormat2
|
||||
* @see KeyValueSortReducer
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
|
|
|
@ -344,11 +344,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated mistakenly made public in 0.98.7. scope will change to package-private
|
||||
*/
|
||||
@Deprecated
|
||||
public String reverseDNS(InetAddress ipAddress) throws NamingException, UnknownHostException {
|
||||
String reverseDNS(InetAddress ipAddress) throws NamingException, UnknownHostException {
|
||||
String hostName = this.reverseDNSCacheMap.get(ipAddress);
|
||||
if (hostName == null) {
|
||||
String ipAddressString = null;
|
||||
|
@ -557,16 +553,6 @@ extends InputFormat<ImmutableBytesWritable, Result> {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows subclasses to get the {@link Table}.
|
||||
*
|
||||
* @deprecated use {@link #getTable()}
|
||||
*/
|
||||
@Deprecated
|
||||
protected HTable getHTable() {
|
||||
return (HTable) this.getTable();
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows subclasses to get the {@link RegionLocator}.
|
||||
*/
|
||||
|
@ -597,24 +583,6 @@ extends InputFormat<ImmutableBytesWritable, Result> {
|
|||
return admin;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows subclasses to set the {@link HTable}.
|
||||
*
|
||||
* Will attempt to reuse the underlying Connection for our own needs, including
|
||||
* retreiving an Admin interface to the HBase cluster.
|
||||
*
|
||||
* @param table The table to get the data from.
|
||||
* @throws IOException
|
||||
* @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
protected void setHTable(HTable table) throws IOException {
|
||||
this.table = table;
|
||||
this.connection = table.getConnection();
|
||||
this.regionLocator = table.getRegionLocator();
|
||||
this.admin = this.connection.getAdmin();
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows subclasses to initialize the table information.
|
||||
*
|
||||
|
|
|
@ -51,18 +51,6 @@ extends RecordReader<ImmutableBytesWritable, Result> {
|
|||
this.recordReaderImpl.restart(firstRow);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the HBase table.
|
||||
*
|
||||
* @param htable The table to scan.
|
||||
* @deprecated Use setTable() instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setHTable(Table htable) {
|
||||
this.setTable(htable);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param table the {@link Table} to scan.
|
||||
*/
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.apache.hadoop.mapreduce.InputSplit;
|
|||
@InterfaceStability.Evolving
|
||||
public class TableSplit extends InputSplit
|
||||
implements Writable, Comparable<TableSplit> {
|
||||
/** @deprecated LOG variable would be made private. */
|
||||
/** @deprecated LOG variable would be made private. fix in hbase 3.0 */
|
||||
@Deprecated
|
||||
public static final Log LOG = LogFactory.getLog(TableSplit.class);
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
|
||||
/**
|
||||
* Emits Sorted KeyValues. Parse the passed text and creates KeyValues. Sorts them before emit.
|
||||
* @see HFileOutputFormat
|
||||
* @see HFileOutputFormat2
|
||||
* @see KeyValueSortReducer
|
||||
* @see PutSortReducer
|
||||
*/
|
||||
|
|
|
@ -1432,7 +1432,7 @@ public class RegionCoprocessorHost
|
|||
|
||||
/**
|
||||
* @return true if default behavior should be bypassed, false otherwise
|
||||
* @deprecated use {@link #preWALRestore(HRegionInfo, WALKey, WALEdit)}
|
||||
* @deprecated use {@link #preWALRestore(HRegionInfo, WALKey, WALEdit)}; as of 2.0, remove in 3.0
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean preWALRestore(final HRegionInfo info, final HLogKey logKey,
|
||||
|
@ -1469,7 +1469,7 @@ public class RegionCoprocessorHost
|
|||
}
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #postWALRestore(HRegionInfo, WALKey, WALEdit)}
|
||||
* @deprecated use {@link #postWALRestore(HRegionInfo, WALKey, WALEdit)}; as of 2.0, remove in 3.0
|
||||
*/
|
||||
@Deprecated
|
||||
public void postWALRestore(final HRegionInfo info, final HLogKey logKey, final WALEdit logEdit)
|
||||
|
|
|
@ -133,19 +133,10 @@ public abstract class RegionSplitPolicy extends Configured {
|
|||
* the split reference even when the split row not lies in the range. This method can be used
|
||||
* to decide, whether to skip the the StoreFile range check or not.
|
||||
* @return whether to skip the StoreFile range check or not
|
||||
* @deprecated Use {@link #skipStoreFileRangeCheck(String)}} instead
|
||||
*/
|
||||
@Deprecated
|
||||
protected boolean skipStoreFileRangeCheck() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* See {@link #skipStoreFileRangeCheck()} javadoc.
|
||||
* @param familyName
|
||||
* @return whether to skip the StoreFile range check or not
|
||||
*/
|
||||
protected boolean skipStoreFileRangeCheck(String familyName) {
|
||||
return skipStoreFileRangeCheck();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ public interface SplitTransaction {
|
|||
* @return Regions created
|
||||
* @throws IOException
|
||||
* @see #rollback(Server, RegionServerServices)
|
||||
* @deprecated use #execute(Server, RegionServerServices, User)
|
||||
* @deprecated use #execute(Server, RegionServerServices, User); as of 1.0.2, remove in 3.0
|
||||
*/
|
||||
@Deprecated
|
||||
PairOfSameType<Region> execute(Server server, RegionServerServices services) throws IOException;
|
||||
|
@ -218,7 +218,7 @@ public interface SplitTransaction {
|
|||
* @throws IOException If thrown, rollback failed. Take drastic action.
|
||||
* @return True if we successfully rolled back, false if we got to the point
|
||||
* of no return and so now need to abort the server to minimize damage.
|
||||
* @deprecated use #rollback(Server, RegionServerServices, User)
|
||||
* @deprecated use #rollback(Server, RegionServerServices, User); as of 1.0.2, remove in 3.0
|
||||
*/
|
||||
@Deprecated
|
||||
boolean rollback(Server server, RegionServerServices services) throws IOException;
|
||||
|
|
|
@ -49,7 +49,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
*
|
||||
* <p>Some Transactional edits (START, COMMIT, ABORT) will not have an
|
||||
* associated row.
|
||||
* @deprecated use WALKey
|
||||
* @deprecated use WALKey. as of 2.0. Remove in 3.0
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
|
||||
@Deprecated
|
||||
|
|
|
@ -1,77 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.security.access;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* A Static Interface.
|
||||
* Instead of having this code in the the HbaseMapWritable code, where it
|
||||
* blocks the possibility of altering the variables and changing their types,
|
||||
* it is put here in this static interface where the static final Maps are
|
||||
* loaded one time. Only byte[] and Cell are supported at this time.
|
||||
* @deprecated In place until we come up on 0.96 and then it can be removed
|
||||
* along with {@link HbaseObjectWritableFor96Migration}; needed to read
|
||||
* pre-0.96 TablePermissions.
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.Private
|
||||
interface CodeToClassAndBackFor96Migration {
|
||||
/**
|
||||
* Static map that contains mapping from code to class
|
||||
*/
|
||||
Map<Byte, Class<?>> CODE_TO_CLASS =
|
||||
new HashMap<Byte, Class<?>>();
|
||||
|
||||
/**
|
||||
* Static map that contains mapping from class to code
|
||||
*/
|
||||
Map<Class<?>, Byte> CLASS_TO_CODE =
|
||||
new HashMap<Class<?>, Byte>();
|
||||
|
||||
/**
|
||||
* Class list for supported classes
|
||||
*/
|
||||
Class<?>[] classList = {byte[].class};
|
||||
|
||||
/**
|
||||
* The static loader that is used instead of the static constructor in
|
||||
* HbaseMapWritable.
|
||||
*/
|
||||
InternalStaticLoader sl =
|
||||
new InternalStaticLoader(classList, CODE_TO_CLASS, CLASS_TO_CODE);
|
||||
|
||||
/**
|
||||
* Class that loads the static maps with their values.
|
||||
*/
|
||||
class InternalStaticLoader{
|
||||
InternalStaticLoader(Class<?>[] classList,
|
||||
Map<Byte,Class<?>> CODE_TO_CLASS, Map<Class<?>, Byte> CLASS_TO_CODE){
|
||||
byte code = 1;
|
||||
for(int i=0; i<classList.length; i++){
|
||||
CLASS_TO_CODE.put(classList[i], code);
|
||||
CODE_TO_CLASS.put(code, classList[i]);
|
||||
code++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -56,20 +56,6 @@ public class TokenUtil {
|
|||
// This class is referenced indirectly by User out in common; instances are created by reflection
|
||||
private static final Log LOG = LogFactory.getLog(TokenUtil.class);
|
||||
|
||||
/**
|
||||
* Obtain and return an authentication token for the current user.
|
||||
* @param conf the configuration for connecting to the cluster
|
||||
* @return the authentication token instance
|
||||
* @deprecated Replaced by {@link #obtainToken(Connection)}
|
||||
*/
|
||||
@Deprecated
|
||||
public static Token<AuthenticationTokenIdentifier> obtainToken(
|
||||
Configuration conf) throws IOException {
|
||||
try (Connection connection = ConnectionFactory.createConnection(conf)) {
|
||||
return obtainToken(connection);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain and return an authentication token for the current user.
|
||||
* @param conn The HBase cluster connection
|
||||
|
@ -120,28 +106,6 @@ public class TokenUtil {
|
|||
? token.getService() : new Text("default");
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain an authentication token for the given user and add it to the
|
||||
* user's credentials.
|
||||
* @param conf The configuration for connecting to the cluster
|
||||
* @param user The user for whom to obtain the token
|
||||
* @throws IOException If making a remote call to the authentication service fails
|
||||
* @throws InterruptedException If executing as the given user is interrupted
|
||||
* @deprecated Replaced by {@link #obtainAndCacheToken(Connection,User)}
|
||||
*/
|
||||
@Deprecated
|
||||
public static void obtainAndCacheToken(final Configuration conf,
|
||||
UserGroupInformation user)
|
||||
throws IOException, InterruptedException {
|
||||
Connection conn = ConnectionFactory.createConnection(conf);
|
||||
try {
|
||||
UserProvider userProvider = UserProvider.instantiate(conf);
|
||||
obtainAndCacheToken(conn, userProvider.create(user));
|
||||
} finally {
|
||||
conn.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain an authentication token for the given user and add it to the
|
||||
* user's credentials.
|
||||
|
@ -176,29 +140,6 @@ public class TokenUtil {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain an authentication token on behalf of the given user and add it to
|
||||
* the credentials for the given map reduce job.
|
||||
* @param conf The configuration for connecting to the cluster
|
||||
* @param user The user for whom to obtain the token
|
||||
* @param job The job instance in which the token should be stored
|
||||
* @throws IOException If making a remote call to the authentication service fails
|
||||
* @throws InterruptedException If executing as the given user is interrupted
|
||||
* @deprecated Replaced by {@link #obtainTokenForJob(Connection,User,Job)}
|
||||
*/
|
||||
@Deprecated
|
||||
public static void obtainTokenForJob(final Configuration conf,
|
||||
UserGroupInformation user, Job job)
|
||||
throws IOException, InterruptedException {
|
||||
Connection conn = ConnectionFactory.createConnection(conf);
|
||||
try {
|
||||
UserProvider userProvider = UserProvider.instantiate(conf);
|
||||
obtainTokenForJob(conn, userProvider.create(user), job);
|
||||
} finally {
|
||||
conn.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain an authentication token on behalf of the given user and add it to
|
||||
* the credentials for the given map reduce job.
|
||||
|
@ -235,28 +176,6 @@ public class TokenUtil {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain an authentication token on behalf of the given user and add it to
|
||||
* the credentials for the given map reduce job.
|
||||
* @param user The user for whom to obtain the token
|
||||
* @param job The job configuration in which the token should be stored
|
||||
* @throws IOException If making a remote call to the authentication service fails
|
||||
* @throws InterruptedException If executing as the given user is interrupted
|
||||
* @deprecated Replaced by {@link #obtainTokenForJob(Connection,JobConf,User)}
|
||||
*/
|
||||
@Deprecated
|
||||
public static void obtainTokenForJob(final JobConf job,
|
||||
UserGroupInformation user)
|
||||
throws IOException, InterruptedException {
|
||||
Connection conn = ConnectionFactory.createConnection(job);
|
||||
try {
|
||||
UserProvider userProvider = UserProvider.instantiate(job);
|
||||
obtainTokenForJob(conn, job, userProvider.create(user));
|
||||
} finally {
|
||||
conn.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain an authentication token on behalf of the given user and add it to
|
||||
* the credentials for the given map reduce job.
|
||||
|
|
|
@ -1,421 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
import org.apache.commons.cli.GnuParser;
|
||||
import org.apache.commons.cli.HelpFormatter;
|
||||
import org.apache.commons.cli.Option;
|
||||
import org.apache.commons.cli.Options;
|
||||
import org.apache.commons.cli.ParseException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.io.FileLink;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
/**
|
||||
* Tool to detect presence of any HFileV1 in the given directory. It prints all such regions which
|
||||
* have such files.
|
||||
* <p>
|
||||
* To print the help section of the tool:
|
||||
* </p>
|
||||
* <ul>
|
||||
* <li>./bin/hbase org.apache.hadoop.hbase.util.HFileV1Detector --h or,</li>
|
||||
* <li>java -cp `hbase classpath` org.apache.hadoop.hbase.util.HFileV1Detector --h</li>
|
||||
* </ul>
|
||||
* <p>
|
||||
* It also supports -h, --help, -help options.
|
||||
* </p>
|
||||
*/
|
||||
public class HFileV1Detector extends Configured implements Tool {
|
||||
private FileSystem fs;
|
||||
private static final Log LOG = LogFactory.getLog(HFileV1Detector.class);
|
||||
private static final int DEFAULT_NUM_OF_THREADS = 10;
|
||||
/**
|
||||
* Pre-namespace archive directory
|
||||
*/
|
||||
private static final String PRE_NS_DOT_ARCHIVE = ".archive";
|
||||
/**
|
||||
* Pre-namespace tmp directory
|
||||
*/
|
||||
private static final String PRE_NS_DOT_TMP = ".tmp";
|
||||
private int numOfThreads;
|
||||
/**
|
||||
* directory to start the processing.
|
||||
*/
|
||||
private Path targetDirPath;
|
||||
/**
|
||||
* executor for processing regions.
|
||||
*/
|
||||
private ExecutorService exec;
|
||||
|
||||
/**
|
||||
* Keeps record of processed tables.
|
||||
*/
|
||||
private final Set<Path> processedTables = new HashSet<Path>();
|
||||
/**
|
||||
* set of corrupted HFiles (with undetermined major version)
|
||||
*/
|
||||
private final Set<Path> corruptedHFiles = Collections
|
||||
.newSetFromMap(new ConcurrentHashMap<Path, Boolean>());
|
||||
/**
|
||||
* set of HfileV1;
|
||||
*/
|
||||
private final Set<Path> hFileV1Set = Collections
|
||||
.newSetFromMap(new ConcurrentHashMap<Path, Boolean>());
|
||||
|
||||
private Options options = new Options();
|
||||
/**
|
||||
* used for computing pre-namespace paths for hfilelinks
|
||||
*/
|
||||
private Path defaultNamespace;
|
||||
|
||||
public HFileV1Detector() {
|
||||
Option pathOption = new Option("p", "path", true, "Path to a table, or hbase installation");
|
||||
pathOption.setRequired(false);
|
||||
options.addOption(pathOption);
|
||||
Option threadOption = new Option("n", "numberOfThreads", true,
|
||||
"Number of threads to use while processing HFiles.");
|
||||
threadOption.setRequired(false);
|
||||
options.addOption(threadOption);
|
||||
options.addOption("h", "help", false, "Help");
|
||||
}
|
||||
|
||||
private boolean parseOption(String[] args) throws ParseException, IOException {
|
||||
if (args.length == 0) {
|
||||
return true; // no args will process with default values.
|
||||
}
|
||||
CommandLineParser parser = new GnuParser();
|
||||
CommandLine cmd = parser.parse(options, args);
|
||||
if (cmd.hasOption("h")) {
|
||||
HelpFormatter formatter = new HelpFormatter();
|
||||
formatter.printHelp("HFileV1Detector", options, true);
|
||||
System.out
|
||||
.println("In case no option is provided, it processes hbase.rootdir using 10 threads.");
|
||||
System.out.println("Example:");
|
||||
System.out.println(" To detect any HFileV1 in a given hbase installation '/myhbase':");
|
||||
System.out.println(" $ $HBASE_HOME/bin/hbase " + this.getClass().getName() + " -p /myhbase");
|
||||
System.out.println();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cmd.hasOption("p")) {
|
||||
this.targetDirPath = new Path(FSUtils.getRootDir(getConf()), cmd.getOptionValue("p"));
|
||||
}
|
||||
try {
|
||||
if (cmd.hasOption("n")) {
|
||||
int n = Integer.parseInt(cmd.getOptionValue("n"));
|
||||
if (n < 0 || n > 100) {
|
||||
LOG.warn("Please use a positive number <= 100 for number of threads."
|
||||
+ " Continuing with default value " + DEFAULT_NUM_OF_THREADS);
|
||||
return true;
|
||||
}
|
||||
this.numOfThreads = n;
|
||||
}
|
||||
} catch (NumberFormatException nfe) {
|
||||
LOG.error("Please select a valid number for threads");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks for HFileV1.
|
||||
* @return 0 when no HFileV1 is present.
|
||||
* 1 when a HFileV1 is present or, when there is a file with corrupt major version
|
||||
* (neither V1 nor V2).
|
||||
* -1 in case of any error/exception
|
||||
*/
|
||||
@Override
|
||||
public int run(String args[]) throws IOException, ParseException {
|
||||
FSUtils.setFsDefault(getConf(), new Path(FSUtils.getRootDir(getConf()).toUri()));
|
||||
fs = FileSystem.get(getConf());
|
||||
numOfThreads = DEFAULT_NUM_OF_THREADS;
|
||||
targetDirPath = FSUtils.getRootDir(getConf());
|
||||
if (!parseOption(args)) {
|
||||
System.exit(-1);
|
||||
}
|
||||
this.exec = Executors.newFixedThreadPool(numOfThreads);
|
||||
try {
|
||||
return processResult(checkForV1Files(targetDirPath));
|
||||
} catch (Exception e) {
|
||||
LOG.error(e);
|
||||
} finally {
|
||||
exec.shutdown();
|
||||
fs.close();
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
private void setDefaultNamespaceDir() throws IOException {
|
||||
Path dataDir = new Path(FSUtils.getRootDir(getConf()), HConstants.BASE_NAMESPACE_DIR);
|
||||
defaultNamespace = new Path(dataDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR);
|
||||
}
|
||||
|
||||
private int processResult(Set<Path> regionsWithHFileV1) {
|
||||
LOG.info("Result: \n");
|
||||
printSet(processedTables, "Tables Processed: ");
|
||||
|
||||
int count = hFileV1Set.size();
|
||||
LOG.info("Count of HFileV1: " + count);
|
||||
if (count > 0) printSet(hFileV1Set, "HFileV1:");
|
||||
|
||||
count = corruptedHFiles.size();
|
||||
LOG.info("Count of corrupted files: " + count);
|
||||
if (count > 0) printSet(corruptedHFiles, "Corrupted Files: ");
|
||||
|
||||
count = regionsWithHFileV1.size();
|
||||
LOG.info("Count of Regions with HFileV1: " + count);
|
||||
if (count > 0) printSet(regionsWithHFileV1, "Regions to Major Compact: ");
|
||||
|
||||
return (hFileV1Set.isEmpty() && corruptedHFiles.isEmpty()) ? 0 : 1;
|
||||
}
|
||||
|
||||
private void printSet(Set<Path> result, String msg) {
|
||||
LOG.info(msg);
|
||||
for (Path p : result) {
|
||||
LOG.info(p);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes a directory path, and lists out any HFileV1, if present.
|
||||
* @param targetDir directory to start looking for HFilev1.
|
||||
* @return set of Regions that have HFileV1
|
||||
* @throws IOException
|
||||
*/
|
||||
private Set<Path> checkForV1Files(Path targetDir) throws IOException {
|
||||
LOG.info("Target dir is: " + targetDir);
|
||||
if (!fs.exists(targetDir)) {
|
||||
throw new IOException("The given path does not exist: " + targetDir);
|
||||
}
|
||||
if (isTableDir(fs, targetDir)) {
|
||||
processedTables.add(targetDir);
|
||||
return processTable(targetDir);
|
||||
}
|
||||
Set<Path> regionsWithHFileV1 = new HashSet<Path>();
|
||||
FileStatus[] fsStats = fs.listStatus(targetDir);
|
||||
for (FileStatus fsStat : fsStats) {
|
||||
if (isTableDir(fs, fsStat.getPath()) && !isRootTable(fsStat.getPath())) {
|
||||
processedTables.add(fsStat.getPath());
|
||||
// look for regions and find out any v1 file.
|
||||
regionsWithHFileV1.addAll(processTable(fsStat.getPath()));
|
||||
} else {
|
||||
LOG.info("Ignoring path: " + fsStat.getPath());
|
||||
}
|
||||
}
|
||||
return regionsWithHFileV1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignore ROOT table as it doesn't exist in 0.96.
|
||||
* @param path
|
||||
*/
|
||||
private boolean isRootTable(Path path) {
|
||||
if (path != null && path.toString().endsWith("-ROOT-")) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find out regions in the table which have HFileV1.
|
||||
* @param tableDir
|
||||
* @return the set of regions containing HFile v1.
|
||||
* @throws IOException
|
||||
*/
|
||||
private Set<Path> processTable(Path tableDir) throws IOException {
|
||||
// list out the regions and then process each file in it.
|
||||
LOG.debug("processing table: " + tableDir);
|
||||
List<Future<Path>> regionLevelResults = new ArrayList<Future<Path>>();
|
||||
Set<Path> regionsWithHFileV1 = new HashSet<Path>();
|
||||
|
||||
FileStatus[] fsStats = fs.listStatus(tableDir);
|
||||
for (FileStatus fsStat : fsStats) {
|
||||
// process each region
|
||||
if (isRegionDir(fs, fsStat.getPath())) {
|
||||
regionLevelResults.add(processRegion(fsStat.getPath()));
|
||||
}
|
||||
}
|
||||
for (Future<Path> f : regionLevelResults) {
|
||||
try {
|
||||
if (f.get() != null) {
|
||||
regionsWithHFileV1.add(f.get());
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LOG.error(e);
|
||||
} catch (ExecutionException e) {
|
||||
LOG.error(e); // might be a bad hfile. We print it at the end.
|
||||
}
|
||||
}
|
||||
return regionsWithHFileV1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Each region is processed by a separate handler. If a HRegion has a hfileV1, its path is
|
||||
* returned as the future result, otherwise, a null value is returned.
|
||||
* @param regionDir Region to process.
|
||||
* @return corresponding Future object.
|
||||
*/
|
||||
private Future<Path> processRegion(final Path regionDir) {
|
||||
LOG.debug("processing region: " + regionDir);
|
||||
Callable<Path> regionCallable = new Callable<Path>() {
|
||||
@Override
|
||||
public Path call() throws Exception {
|
||||
for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) {
|
||||
FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
|
||||
if (storeFiles == null || storeFiles.length == 0) continue;
|
||||
for (FileStatus storeFile : storeFiles) {
|
||||
Path storeFilePath = storeFile.getPath();
|
||||
FSDataInputStream fsdis = null;
|
||||
long lenToRead = 0;
|
||||
try {
|
||||
// check whether this path is a reference.
|
||||
if (StoreFileInfo.isReference(storeFilePath)) continue;
|
||||
// check whether this path is a HFileLink.
|
||||
else if (HFileLink.isHFileLink(storeFilePath)) {
|
||||
FileLink fLink = getFileLinkWithPreNSPath(storeFilePath);
|
||||
fsdis = fLink.open(fs);
|
||||
lenToRead = fLink.getFileStatus(fs).getLen();
|
||||
} else {
|
||||
// a regular hfile
|
||||
fsdis = fs.open(storeFilePath);
|
||||
lenToRead = storeFile.getLen();
|
||||
}
|
||||
int majorVersion = computeMajorVersion(fsdis, lenToRead);
|
||||
if (majorVersion == 1) {
|
||||
hFileV1Set.add(storeFilePath);
|
||||
// return this region path, as it needs to be compacted.
|
||||
return regionDir;
|
||||
}
|
||||
if (majorVersion > 2 || majorVersion < 1) throw new IllegalArgumentException(
|
||||
"Incorrect major version: " + majorVersion);
|
||||
} catch (Exception iae) {
|
||||
corruptedHFiles.add(storeFilePath);
|
||||
LOG.error("Got exception while reading trailer for file: "+ storeFilePath, iae);
|
||||
} finally {
|
||||
if (fsdis != null) fsdis.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private int computeMajorVersion(FSDataInputStream istream, long fileSize)
|
||||
throws IOException {
|
||||
//read up the last int of the file. Major version is in the last 3 bytes.
|
||||
long seekPoint = fileSize - Bytes.SIZEOF_INT;
|
||||
if (seekPoint < 0)
|
||||
throw new IllegalArgumentException("File too small, no major version found");
|
||||
|
||||
// Read the version from the last int of the file.
|
||||
istream.seek(seekPoint);
|
||||
int version = istream.readInt();
|
||||
// Extract and return the major version
|
||||
return version & 0x00ffffff;
|
||||
}
|
||||
};
|
||||
Future<Path> f = exec.submit(regionCallable);
|
||||
return f;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a FileLink which adds pre-namespace paths in its list of available paths. This is used
|
||||
* when reading a snapshot file in a pre-namespace file layout, for example, while upgrading.
|
||||
* @param storeFilePath
|
||||
* @return a FileLink which could read from pre-namespace paths.
|
||||
* @throws IOException
|
||||
*/
|
||||
public FileLink getFileLinkWithPreNSPath(Path storeFilePath) throws IOException {
|
||||
HFileLink link = HFileLink.buildFromHFileLinkPattern(getConf(), storeFilePath);
|
||||
List<Path> pathsToProcess = getPreNSPathsForHFileLink(link);
|
||||
pathsToProcess.addAll(Arrays.asList(link.getLocations()));
|
||||
return new FileLink(pathsToProcess);
|
||||
}
|
||||
|
||||
private List<Path> getPreNSPathsForHFileLink(HFileLink fileLink) throws IOException {
|
||||
if (defaultNamespace == null) setDefaultNamespaceDir();
|
||||
List<Path> p = new ArrayList<Path>();
|
||||
String relativeTablePath = removeDefaultNSPath(fileLink.getOriginPath());
|
||||
p.add(getPreNSPath(PRE_NS_DOT_ARCHIVE, relativeTablePath));
|
||||
p.add(getPreNSPath(PRE_NS_DOT_TMP, relativeTablePath));
|
||||
p.add(getPreNSPath(null, relativeTablePath));
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the prefix of defaultNamespace from the path.
|
||||
* @param originalPath
|
||||
*/
|
||||
private String removeDefaultNSPath(Path originalPath) {
|
||||
String pathStr = originalPath.toString();
|
||||
if (!pathStr.startsWith(defaultNamespace.toString())) return pathStr;
|
||||
return pathStr.substring(defaultNamespace.toString().length() + 1);
|
||||
}
|
||||
|
||||
private Path getPreNSPath(String prefix, String relativeTablePath) throws IOException {
|
||||
String relativePath = (prefix == null ? relativeTablePath : prefix + Path.SEPARATOR
|
||||
+ relativeTablePath);
|
||||
return new Path(FSUtils.getRootDir(getConf()), relativePath);
|
||||
}
|
||||
|
||||
private static boolean isTableDir(final FileSystem fs, final Path path) throws IOException {
|
||||
// check for old format, of having /table/.tableinfo; hbase:meta doesn't has .tableinfo,
|
||||
// include it.
|
||||
if (fs.isFile(path)) return false;
|
||||
return (FSTableDescriptors.getTableInfoPath(fs, path) != null || FSTableDescriptors
|
||||
.getCurrentTableInfoStatus(fs, path, false) != null) || path.toString().endsWith(".META.");
|
||||
}
|
||||
|
||||
private static boolean isRegionDir(final FileSystem fs, final Path path) throws IOException {
|
||||
if (fs.isFile(path)) return false;
|
||||
Path regionInfo = new Path(path, HRegionFileSystem.REGION_INFO_FILE);
|
||||
return fs.exists(regionInfo);
|
||||
|
||||
}
|
||||
|
||||
public static void main(String args[]) throws Exception {
|
||||
System.exit(ToolRunner.run(HBaseConfiguration.create(), new HFileV1Detector(), args));
|
||||
}
|
||||
|
||||
}
|
|
@ -1,354 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.math.random.RandomData;
|
||||
import org.apache.commons.math.random.RandomDataImpl;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.io.MapFile;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* This class runs performance benchmarks for {@link MapFile}.
|
||||
* </p>
|
||||
* @deprecated HBase does not use MapFiles any more.
|
||||
*/
|
||||
@Deprecated
|
||||
public class MapFilePerformanceEvaluation {
|
||||
protected final Configuration conf;
|
||||
private static final int ROW_LENGTH = 10;
|
||||
private static final int ROW_COUNT = 100000;
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(MapFilePerformanceEvaluation.class.getName());
|
||||
|
||||
/**
|
||||
* @param c
|
||||
*/
|
||||
public MapFilePerformanceEvaluation(final Configuration c) {
|
||||
super();
|
||||
this.conf = c;
|
||||
}
|
||||
|
||||
static ImmutableBytesWritable format(final int i, ImmutableBytesWritable w) {
|
||||
String v = Integer.toString(i);
|
||||
w.set(Bytes.toBytes("0000000000".substring(v.length()) + v));
|
||||
return w;
|
||||
}
|
||||
|
||||
private void runBenchmarks() throws Exception {
|
||||
final FileSystem fs = FileSystem.get(this.conf);
|
||||
final Path mf = fs.makeQualified(new Path("performanceevaluation.mapfile"));
|
||||
if (fs.exists(mf)) {
|
||||
fs.delete(mf, true);
|
||||
}
|
||||
runBenchmark(new SequentialWriteBenchmark(conf, fs, mf, ROW_COUNT),
|
||||
ROW_COUNT);
|
||||
|
||||
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
runBenchmark(new UniformRandomSmallScan(conf, fs, mf, ROW_COUNT),
|
||||
ROW_COUNT);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
runBenchmark(new UniformRandomReadBenchmark(conf, fs, mf, ROW_COUNT),
|
||||
ROW_COUNT);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
runBenchmark(new GaussianRandomReadBenchmark(conf, fs, mf, ROW_COUNT),
|
||||
ROW_COUNT);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
PerformanceEvaluationCommons.concurrentReads(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
runBenchmark(new SequentialReadBenchmark(conf, fs, mf, ROW_COUNT),
|
||||
ROW_COUNT);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
protected void runBenchmark(RowOrientedBenchmark benchmark, int rowCount)
|
||||
throws Exception {
|
||||
LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " +
|
||||
rowCount + " rows.");
|
||||
long elapsedTime = benchmark.run();
|
||||
LOG.info("Running " + benchmark.getClass().getSimpleName() + " for " +
|
||||
rowCount + " rows took " + elapsedTime + "ms.");
|
||||
}
|
||||
|
||||
static abstract class RowOrientedBenchmark {
|
||||
|
||||
protected final Configuration conf;
|
||||
protected final FileSystem fs;
|
||||
protected final Path mf;
|
||||
protected final int totalRows;
|
||||
|
||||
public RowOrientedBenchmark(Configuration conf, FileSystem fs, Path mf,
|
||||
int totalRows) {
|
||||
this.conf = conf;
|
||||
this.fs = fs;
|
||||
this.mf = mf;
|
||||
this.totalRows = totalRows;
|
||||
}
|
||||
|
||||
void setUp() throws Exception {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
abstract void doRow(int i) throws Exception;
|
||||
|
||||
protected int getReportingPeriod() {
|
||||
return this.totalRows / 10;
|
||||
}
|
||||
|
||||
void tearDown() throws Exception {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
/**
|
||||
* Run benchmark
|
||||
* @return elapsed time.
|
||||
* @throws Exception
|
||||
*/
|
||||
long run() throws Exception {
|
||||
long elapsedTime;
|
||||
setUp();
|
||||
long startTime = System.currentTimeMillis();
|
||||
try {
|
||||
for (int i = 0; i < totalRows; i++) {
|
||||
if (i > 0 && i % getReportingPeriod() == 0) {
|
||||
LOG.info("Processed " + i + " rows.");
|
||||
}
|
||||
doRow(i);
|
||||
}
|
||||
elapsedTime = System.currentTimeMillis() - startTime;
|
||||
} finally {
|
||||
tearDown();
|
||||
}
|
||||
return elapsedTime;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class SequentialWriteBenchmark extends RowOrientedBenchmark {
|
||||
|
||||
protected MapFile.Writer writer;
|
||||
private Random random = new Random();
|
||||
private byte[] bytes = new byte[ROW_LENGTH];
|
||||
private ImmutableBytesWritable key = new ImmutableBytesWritable();
|
||||
private ImmutableBytesWritable value = new ImmutableBytesWritable();
|
||||
|
||||
public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf,
|
||||
int totalRows) {
|
||||
super(conf, fs, mf, totalRows);
|
||||
}
|
||||
|
||||
@Override
|
||||
void setUp() throws Exception {
|
||||
writer = new MapFile.Writer(conf, fs, mf.toString(),
|
||||
ImmutableBytesWritable.class, ImmutableBytesWritable.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
void doRow(int i) throws Exception {
|
||||
value.set(generateValue());
|
||||
writer.append(format(i, key), value);
|
||||
}
|
||||
|
||||
private byte[] generateValue() {
|
||||
random.nextBytes(bytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getReportingPeriod() {
|
||||
return this.totalRows; // don't report progress
|
||||
}
|
||||
|
||||
@Override
|
||||
void tearDown() throws Exception {
|
||||
writer.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static abstract class ReadBenchmark extends RowOrientedBenchmark {
|
||||
ImmutableBytesWritable key = new ImmutableBytesWritable();
|
||||
ImmutableBytesWritable value = new ImmutableBytesWritable();
|
||||
|
||||
protected MapFile.Reader reader;
|
||||
|
||||
public ReadBenchmark(Configuration conf, FileSystem fs, Path mf,
|
||||
int totalRows) {
|
||||
super(conf, fs, mf, totalRows);
|
||||
}
|
||||
|
||||
@Override
|
||||
void setUp() throws Exception {
|
||||
reader = new MapFile.Reader(fs, mf.toString(), conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
void tearDown() throws Exception {
|
||||
reader.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class SequentialReadBenchmark extends ReadBenchmark {
|
||||
ImmutableBytesWritable verify = new ImmutableBytesWritable();
|
||||
|
||||
public SequentialReadBenchmark(Configuration conf, FileSystem fs,
|
||||
Path mf, int totalRows) {
|
||||
super(conf, fs, mf, totalRows);
|
||||
}
|
||||
|
||||
@Override
|
||||
void doRow(int i) throws Exception {
|
||||
this.reader.next(key, value);
|
||||
PerformanceEvaluationCommons.assertKey(this.key.get(),
|
||||
format(i, this.verify).get());
|
||||
PerformanceEvaluationCommons.assertValueSize(ROW_LENGTH, value.getLength());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getReportingPeriod() {
|
||||
return this.totalRows; // don't report progress
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class UniformRandomReadBenchmark extends ReadBenchmark {
|
||||
|
||||
private Random random = new Random();
|
||||
|
||||
public UniformRandomReadBenchmark(Configuration conf, FileSystem fs,
|
||||
Path mf, int totalRows) {
|
||||
super(conf, fs, mf, totalRows);
|
||||
}
|
||||
|
||||
@Override
|
||||
void doRow(int i) throws Exception {
|
||||
ImmutableBytesWritable k = getRandomRow();
|
||||
ImmutableBytesWritable r = (ImmutableBytesWritable)reader.get(k, value);
|
||||
PerformanceEvaluationCommons.assertValueSize(r.getLength(), ROW_LENGTH);
|
||||
}
|
||||
|
||||
private ImmutableBytesWritable getRandomRow() {
|
||||
return format(random.nextInt(totalRows), key);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class UniformRandomSmallScan extends ReadBenchmark {
|
||||
private Random random = new Random();
|
||||
|
||||
public UniformRandomSmallScan(Configuration conf, FileSystem fs,
|
||||
Path mf, int totalRows) {
|
||||
super(conf, fs, mf, totalRows/10);
|
||||
}
|
||||
|
||||
@Override
|
||||
void doRow(int i) throws Exception {
|
||||
ImmutableBytesWritable ibw = getRandomRow();
|
||||
WritableComparable<?> wc = this.reader.getClosest(ibw, this.value);
|
||||
if (wc == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
PerformanceEvaluationCommons.assertKey(ibw.get(),
|
||||
((ImmutableBytesWritable)wc).get());
|
||||
// TODO: Verify we're getting right values.
|
||||
for (int ii = 0; ii < 29; ii++) {
|
||||
this.reader.next(this.key, this.value);
|
||||
PerformanceEvaluationCommons.assertValueSize(this.value.getLength(), ROW_LENGTH);
|
||||
}
|
||||
}
|
||||
|
||||
private ImmutableBytesWritable getRandomRow() {
|
||||
return format(random.nextInt(totalRows), key);
|
||||
}
|
||||
}
|
||||
|
||||
static class GaussianRandomReadBenchmark extends ReadBenchmark {
|
||||
private RandomData randomData = new RandomDataImpl();
|
||||
|
||||
public GaussianRandomReadBenchmark(Configuration conf, FileSystem fs,
|
||||
Path mf, int totalRows) {
|
||||
super(conf, fs, mf, totalRows);
|
||||
}
|
||||
|
||||
@Override
|
||||
void doRow(int i) throws Exception {
|
||||
ImmutableBytesWritable k = getGaussianRandomRow();
|
||||
ImmutableBytesWritable r = (ImmutableBytesWritable)reader.get(k, value);
|
||||
PerformanceEvaluationCommons.assertValueSize(r.getLength(), ROW_LENGTH);
|
||||
}
|
||||
|
||||
private ImmutableBytesWritable getGaussianRandomRow() {
|
||||
int r = (int) randomData.nextGaussian((double)totalRows / 2.0,
|
||||
(double)totalRows / 10.0);
|
||||
return format(r, key);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @param args
|
||||
* @throws Exception
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
new MapFilePerformanceEvaluation(HBaseConfiguration.create()).
|
||||
runBenchmarks();
|
||||
}
|
||||
}
|
|
@ -1613,15 +1613,6 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #generateData(java.util.Random, int)} instead.
|
||||
* @return Generated random value to insert into a table cell.
|
||||
*/
|
||||
@Deprecated
|
||||
public static byte[] generateValue(final Random r) {
|
||||
return generateData(r, DEFAULT_VALUE_LENGTH);
|
||||
}
|
||||
|
||||
static byte [] getRandomRow(final Random random, final int totalRows) {
|
||||
return format(random.nextInt(Integer.MAX_VALUE) % totalRows);
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class CodecPerformance {
|
||||
/** @deprecated LOG variable would be made private. */
|
||||
/** @deprecated LOG variable would be made private. since 1.2, remove in 3.0 */
|
||||
@Deprecated
|
||||
public static final Log LOG = LogFactory.getLog(CodecPerformance.class);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue