HBASE-2233 Support both Hadoop 0.20 and 0.22

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1134089 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-06-09 21:27:41 +00:00
parent bfdceb5117
commit 7cd5403b30
18 changed files with 304 additions and 163 deletions

View File

@ -255,7 +255,6 @@ Release 0.91.0 - Unreleased
hbase-1502 hbase-1502
HBASE-3071 Graceful decommissioning of a regionserver HBASE-3071 Graceful decommissioning of a regionserver
NEW FEATURES NEW FEATURES
HBASE-2001 Coprocessors: Colocate user code with regions (Mingjie Lai via HBASE-2001 Coprocessors: Colocate user code with regions (Mingjie Lai via
Andrew Purtell) Andrew Purtell)
@ -285,6 +284,7 @@ Release 0.91.0 - Unreleased
RS web UIs RS web UIs
HBASE-3691 Add compressor support for 'snappy', google's compressor HBASE-3691 Add compressor support for 'snappy', google's compressor
(Nichole Treadway and Nicholas Telford) (Nichole Treadway and Nicholas Telford)
HBASE-2233 Support both Hadoop 0.20 and 0.22
Release 0.90.4 - Unreleased Release 0.90.4 - Unreleased

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.security.UserGroupInformation;
*/ */
class ConnectionHeader implements Writable { class ConnectionHeader implements Writable {
private String protocol; private String protocol;
private UserGroupInformation ugi = null;
public ConnectionHeader() {} public ConnectionHeader() {}
@ -47,7 +46,6 @@ class ConnectionHeader implements Writable {
*/ */
public ConnectionHeader(String protocol, UserGroupInformation ugi) { public ConnectionHeader(String protocol, UserGroupInformation ugi) {
this.protocol = protocol; this.protocol = protocol;
this.ugi = ugi;
} }
@Override @Override
@ -56,26 +54,11 @@ class ConnectionHeader implements Writable {
if (protocol.isEmpty()) { if (protocol.isEmpty()) {
protocol = null; protocol = null;
} }
boolean ugiUsernamePresent = in.readBoolean();
if (ugiUsernamePresent) {
String username = in.readUTF();
ugi.readFields(in);
} else {
ugi = null;
}
} }
@Override @Override
public void write(DataOutput out) throws IOException { public void write(DataOutput out) throws IOException {
Text.writeString(out, (protocol == null) ? "" : protocol); Text.writeString(out, (protocol == null) ? "" : protocol);
if (ugi != null) {
//Send both effective user and real user for simple auth
out.writeBoolean(true);
out.writeUTF(ugi.getUserName());
} else {
out.writeBoolean(false);
}
} }
public String getProtocol() { public String getProtocol() {
@ -83,10 +66,10 @@ class ConnectionHeader implements Writable {
} }
public UserGroupInformation getUgi() { public UserGroupInformation getUgi() {
return ugi; return null;
} }
public String toString() { public String toString() {
return protocol + "-" + ugi; return protocol;
} }
} }

View File

@ -45,12 +45,12 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mapreduce.hadoopbackport.TotalOrderPartitioner;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter; import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
@ -271,9 +271,16 @@ public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
* The user should be sure to set the map output value class to either KeyValue or Put before * The user should be sure to set the map output value class to either KeyValue or Put before
* running this function. * running this function.
*/ */
public static void configureIncrementalLoad(Job job, HTable table) throws IOException { public static void configureIncrementalLoad(Job job, HTable table)
throws IOException {
Configuration conf = job.getConfiguration(); Configuration conf = job.getConfiguration();
job.setPartitionerClass(TotalOrderPartitioner.class); Class<? extends Partitioner> topClass;
try {
topClass = getTotalOrderPartitionerClass();
} catch (ClassNotFoundException e) {
throw new IOException("Failed getting TotalOrderPartitioner", e);
}
job.setPartitionerClass(topClass);
job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(KeyValue.class); job.setOutputValueClass(KeyValue.class);
job.setOutputFormatClass(HFileOutputFormat.class); job.setOutputFormatClass(HFileOutputFormat.class);
@ -302,10 +309,14 @@ public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
FileSystem fs = partitionsPath.getFileSystem(conf); FileSystem fs = partitionsPath.getFileSystem(conf);
writePartitions(conf, partitionsPath, startKeys); writePartitions(conf, partitionsPath, startKeys);
partitionsPath.makeQualified(fs); partitionsPath.makeQualified(fs);
URI cacheUri; URI cacheUri;
try { try {
// Below we make explicit reference to the bundled TOP. Its cheating.
// We are assume the define in the hbase bundled TOP is as it is in
// hadoop (whether 0.20 or 0.22, etc.)
cacheUri = new URI(partitionsPath.toString() + "#" + cacheUri = new URI(partitionsPath.toString() + "#" +
TotalOrderPartitioner.DEFAULT_PATH); org.apache.hadoop.hbase.mapreduce.hadoopbackport.TotalOrderPartitioner.DEFAULT_PATH);
} catch (URISyntaxException e) { } catch (URISyntaxException e) {
throw new IOException(e); throw new IOException(e);
} }
@ -318,6 +329,27 @@ public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
LOG.info("Incremental table output configured."); LOG.info("Incremental table output configured.");
} }
/**
* If > hadoop 0.20, then we want to use the hadoop TotalOrderPartitioner.
* If 0.20, then we want to use the TOP that we have under hadoopbackport.
* This method is about hbase being able to run on different versions of
* hadoop. In 0.20.x hadoops, we have to use the TOP that is bundled with
* hbase. Otherwise, we use the one in Hadoop.
* @return Instance of the TotalOrderPartitioner class
* @throws ClassNotFoundException If can't find a TotalOrderPartitioner.
*/
private static Class<? extends Partitioner> getTotalOrderPartitionerClass()
throws ClassNotFoundException {
Class<? extends Partitioner> clazz = null;
try {
clazz = (Class<? extends Partitioner>) Class.forName("org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner");
} catch (ClassNotFoundException e) {
clazz =
(Class<? extends Partitioner>) Class.forName("org.apache.hadoop.hbase.mapreduce.hadoopbackport.TotalOrderPartitioner");
}
return clazz;
}
/** /**
* Run inside the task to deserialize column family to compression algorithm * Run inside the task to deserialize column family to compression algorithm
* map from the * map from the

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.mapreduce.hadoopbackport; package org.apache.hadoop.hbase.mapreduce.hadoopbackport;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.Constructor;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
@ -132,8 +133,7 @@ public class InputSampler<K,V> extends Configured implements Tool {
int samplesPerSplit = numSamples / splitsToSample; int samplesPerSplit = numSamples / splitsToSample;
long records = 0; long records = 0;
for (int i = 0; i < splitsToSample; ++i) { for (int i = 0; i < splitsToSample; ++i) {
TaskAttemptContext samplingContext = new TaskAttemptContext( TaskAttemptContext samplingContext = getTaskAttemptContext(job);
job.getConfiguration(), new TaskAttemptID());
RecordReader<K,V> reader = inf.createRecordReader( RecordReader<K,V> reader = inf.createRecordReader(
splits.get(i), samplingContext); splits.get(i), samplingContext);
reader.initialize(splits.get(i), samplingContext); reader.initialize(splits.get(i), samplingContext);
@ -151,6 +151,32 @@ public class InputSampler<K,V> extends Configured implements Tool {
} }
} }
/**
* This method is about making hbase portable, making it so it can run on
* more than just hadoop 0.20. In later hadoops, TaskAttemptContext became
* an Interface. But in hadoops where TAC is an Interface, we shouldn't
* be using the classes that are in this package; we should be using the
* native Hadoop ones (We'll throw a ClassNotFoundException if end up in
* here when we should be using native hadoop TotalOrderPartitioner).
* @param job
* @return
* @throws IOException
*/
public static TaskAttemptContext getTaskAttemptContext(final Job job)
throws IOException {
Constructor<TaskAttemptContext> c;
try {
c = TaskAttemptContext.class.getConstructor(Configuration.class, TaskAttemptID.class);
} catch (Exception e) {
throw new IOException("Failed getting constructor", e);
}
try {
return c.newInstance(job.getConfiguration(), new TaskAttemptID());
} catch (Exception e) {
throw new IOException("Failed creating instance", e);
}
}
/** /**
* Sample from random points in the input. * Sample from random points in the input.
* General-purpose sampler. Takes numSamples / maxSplitsSampled inputs from * General-purpose sampler. Takes numSamples / maxSplitsSampled inputs from
@ -214,8 +240,7 @@ public class InputSampler<K,V> extends Configured implements Tool {
// the target sample keyset // the target sample keyset
for (int i = 0; i < splitsToSample || for (int i = 0; i < splitsToSample ||
(i < splits.size() && samples.size() < numSamples); ++i) { (i < splits.size() && samples.size() < numSamples); ++i) {
TaskAttemptContext samplingContext = new TaskAttemptContext( TaskAttemptContext samplingContext = getTaskAttemptContext(job);
job.getConfiguration(), new TaskAttemptID());
RecordReader<K,V> reader = inf.createRecordReader( RecordReader<K,V> reader = inf.createRecordReader(
splits.get(i), samplingContext); splits.get(i), samplingContext);
reader.initialize(splits.get(i), samplingContext); reader.initialize(splits.get(i), samplingContext);
@ -285,8 +310,7 @@ public class InputSampler<K,V> extends Configured implements Tool {
long records = 0; long records = 0;
long kept = 0; long kept = 0;
for (int i = 0; i < splitsToSample; ++i) { for (int i = 0; i < splitsToSample; ++i) {
TaskAttemptContext samplingContext = new TaskAttemptContext( TaskAttemptContext samplingContext = getTaskAttemptContext(job);
job.getConfiguration(), new TaskAttemptID());
RecordReader<K,V> reader = inf.createRecordReader( RecordReader<K,V> reader = inf.createRecordReader(
splits.get(i), samplingContext); splits.get(i), samplingContext);
reader.initialize(splits.get(i), samplingContext); reader.initialize(splits.get(i), samplingContext);

View File

@ -257,6 +257,7 @@ class CatalogJanitor extends Chore {
for (HColumnDescriptor family: split.getTableDesc().getFamilies()) { for (HColumnDescriptor family: split.getTableDesc().getFamilies()) {
Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(), Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(),
family.getName()); family.getName());
if (!fs.exists(p)) continue;
// Look for reference files. Call listStatus with anonymous instance of PathFilter. // Look for reference files. Call listStatus with anonymous instance of PathFilter.
FileStatus [] ps = fs.listStatus(p, FileStatus [] ps = fs.listStatus(p,
new PathFilter () { new PathFilter () {

View File

@ -595,7 +595,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
int port = this.conf.getInt("hbase.master.info.port", 60010); int port = this.conf.getInt("hbase.master.info.port", 60010);
if (port >= 0) { if (port >= 0) {
String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0"); String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
this.infoServer = new InfoServer(MASTER, a, port, false); this.infoServer = new InfoServer(MASTER, a, port, false, this.conf);
this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class); this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class);
this.infoServer.setAttribute(MASTER, this); this.infoServer.setAttribute(MASTER, this);
this.infoServer.start(); this.infoServer.start();

View File

@ -357,7 +357,7 @@ public class ServerManager {
boolean carryingRoot; boolean carryingRoot;
try { try {
ServerName address = ct.getRootLocation(); ServerName address = ct.getRootLocation();
carryingRoot = address.equals(serverName); carryingRoot = address != null && address.equals(serverName);
} catch (InterruptedException e) { } catch (InterruptedException e) {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
LOG.info("Interrupted"); LOG.info("Interrupted");
@ -369,7 +369,7 @@ public class ServerManager {
// has an inmemory list of who has what. This list will be cleared as we // has an inmemory list of who has what. This list will be cleared as we
// process the dead server but should be find asking it now. // process the dead server but should be find asking it now.
ServerName address = ct.getMetaLocation(); ServerName address = ct.getMetaLocation();
boolean carryingMeta = address.equals(serverName); boolean carryingMeta = address != null && address.equals(serverName);
if (carryingRoot || carryingMeta) { if (carryingRoot || carryingMeta) {
this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master, this.services.getExecutorService().submit(new MetaServerShutdownHandler(this.master,
this.services, this.deadservers, serverName, carryingRoot, carryingMeta)); this.services, this.deadservers, serverName, carryingRoot, carryingMeta));

View File

@ -1288,7 +1288,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
boolean auto = this.conf.getBoolean("hbase.regionserver.info.port.auto", false); boolean auto = this.conf.getBoolean("hbase.regionserver.info.port.auto", false);
while (true) { while (true) {
try { try {
this.infoServer = new InfoServer("regionserver", addr, port, false); this.infoServer = new InfoServer("regionserver", addr, port, false, this.conf);
this.infoServer.addServlet("status", "/rs-status", RSStatusServlet.class); this.infoServer.addServlet("status", "/rs-status", RSStatusServlet.class);
this.infoServer.setAttribute(REGIONSERVER, this); this.infoServer.setAttribute(REGIONSERVER, this);
this.infoServer.start(); this.infoServer.start();

View File

@ -48,6 +48,7 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -139,7 +140,7 @@ public class HLog implements Syncable {
HLog.logReaderClass = null; HLog.logReaderClass = null;
} }
private OutputStream hdfs_out; // OutputStream associated with the current SequenceFile.writer private FSDataOutputStream hdfs_out; // FSDataOutputStream associated with the current SequenceFile.writer
private int initialReplication; // initial replication factor of SequenceFile.writer private int initialReplication; // initial replication factor of SequenceFile.writer
private Method getNumCurrentReplicas; // refers to DFSOutputStream.getNumCurrentReplicas private Method getNumCurrentReplicas; // refers to DFSOutputStream.getNumCurrentReplicas
final static Object [] NO_ARGS = new Object []{}; final static Object [] NO_ARGS = new Object []{};
@ -368,33 +369,42 @@ public class HLog implements Syncable {
rollWriter(); rollWriter();
// handle the reflection necessary to call getNumCurrentReplicas() // handle the reflection necessary to call getNumCurrentReplicas()
this.getNumCurrentReplicas = null; this.getNumCurrentReplicas = getGetNumCurrentReplicas(this.hdfs_out);
logSyncerThread = new LogSyncer(this.optionalFlushInterval);
Threads.setDaemonThreadRunning(logSyncerThread,
Thread.currentThread().getName() + ".logSyncer");
coprocessorHost = new WALCoprocessorHost(this, conf);
}
/**
* Find the 'getNumCurrentReplicas' on the passed <code>os</code> stream.
* @return Method or null.
*/
private Method getGetNumCurrentReplicas(final FSDataOutputStream os) {
Method m = null;
Exception exception = null; Exception exception = null;
if (this.hdfs_out != null) { if (os != null) {
try { try {
this.getNumCurrentReplicas = this.hdfs_out.getClass(). m = os.getWrappedStream().getClass().
getMethod("getNumCurrentReplicas", new Class<?> []{}); getMethod("getNumCurrentReplicas", new Class<?> []{});
this.getNumCurrentReplicas.setAccessible(true); m.setAccessible(true);
} catch (NoSuchMethodException e) { } catch (NoSuchMethodException e) {
// Thrown if getNumCurrentReplicas() function isn't available // Thrown if getNumCurrentReplicas() function isn't available
exception = e; exception = e;
} catch (SecurityException e) { } catch (SecurityException e) {
// Thrown if we can't get access to getNumCurrentReplicas() // Thrown if we can't get access to getNumCurrentReplicas()
exception = e; exception = e;
this.getNumCurrentReplicas = null; // could happen on setAccessible() m = null; // could happen on setAccessible()
} }
} }
if (this.getNumCurrentReplicas != null) { if (m != null) {
LOG.info("Using getNumCurrentReplicas--HDFS-826"); LOG.info("Using getNumCurrentReplicas--HDFS-826");
} else { } else {
LOG.info("getNumCurrentReplicas--HDFS-826 not available; hdfs_out=" + LOG.info("getNumCurrentReplicas--HDFS-826 not available; hdfs_out=" +
this.hdfs_out + ", exception=" + exception.getMessage()); os + ", exception=" + exception.getMessage());
} }
return m;
logSyncerThread = new LogSyncer(this.optionalFlushInterval);
Threads.setDaemonThreadRunning(logSyncerThread,
Thread.currentThread().getName() + ".logSyncer");
coprocessorHost = new WALCoprocessorHost(this, conf);
} }
public void registerWALActionsListener (final WALObserver listener) { public void registerWALActionsListener (final WALObserver listener) {
@ -436,9 +446,15 @@ public class HLog implements Syncable {
return logSeqNum.get(); return logSeqNum.get();
} }
/**
* Method used internal to this class and for tests only.
* @return The wrapped stream our writer is using; its not the
* writer's 'out' FSDatoOutputStream but the stream that this 'out' wraps
* (In hdfs its an instance of DFSDataOutputStream).
*/
// usage: see TestLogRolling.java // usage: see TestLogRolling.java
OutputStream getOutputStream() { OutputStream getOutputStream() {
return this.hdfs_out; return this.hdfs_out.getWrappedStream();
} }
/** /**
@ -482,10 +498,9 @@ public class HLog implements Syncable {
// Can we get at the dfsclient outputstream? If an instance of // Can we get at the dfsclient outputstream? If an instance of
// SFLW, it'll have done the necessary reflection to get at the // SFLW, it'll have done the necessary reflection to get at the
// protected field name. // protected field name.
OutputStream nextHdfsOut = null; FSDataOutputStream nextHdfsOut = null;
if (nextWriter instanceof SequenceFileLogWriter) { if (nextWriter instanceof SequenceFileLogWriter) {
nextHdfsOut = nextHdfsOut = ((SequenceFileLogWriter)nextWriter).getWriterFSDataOutputStream();
((SequenceFileLogWriter)nextWriter).getDFSCOutputStream();
} }
// Tell our listeners that a new log was created // Tell our listeners that a new log was created
if (!this.listeners.isEmpty()) { if (!this.listeners.isEmpty()) {
@ -768,6 +783,7 @@ public class HLog implements Syncable {
*/ */
public void closeAndDelete() throws IOException { public void closeAndDelete() throws IOException {
close(); close();
if (!fs.exists(this.dir)) return;
FileStatus[] files = fs.listStatus(this.dir); FileStatus[] files = fs.listStatus(this.dir);
for(FileStatus file : files) { for(FileStatus file : files) {
Path p = getHLogArchivePath(this.oldLogDir, file.getPath()); Path p = getHLogArchivePath(this.oldLogDir, file.getPath());
@ -966,8 +982,7 @@ public class HLog implements Syncable {
} }
} }
@Override private void syncer() throws IOException {
public void sync() throws IOException {
synchronized (this.updateLock) { synchronized (this.updateLock) {
if (this.closed) { if (this.closed) {
return; return;
@ -1027,9 +1042,10 @@ public class HLog implements Syncable {
* *
* @throws Exception * @throws Exception
*/ */
int getLogReplication() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException { int getLogReplication()
if(this.getNumCurrentReplicas != null && this.hdfs_out != null) { throws IllegalArgumentException, IllegalAccessException, InvocationTargetException {
Object repl = this.getNumCurrentReplicas.invoke(this.hdfs_out, NO_ARGS); if (this.getNumCurrentReplicas != null && this.hdfs_out != null) {
Object repl = this.getNumCurrentReplicas.invoke(getOutputStream(), NO_ARGS);
if (repl instanceof Integer) { if (repl instanceof Integer) {
return ((Integer)repl).intValue(); return ((Integer)repl).intValue();
} }
@ -1042,8 +1058,15 @@ public class HLog implements Syncable {
} }
public void hsync() throws IOException { public void hsync() throws IOException {
// Not yet implemented up in hdfs so just call hflush. syncer();
sync(); }
public void hflush() throws IOException {
syncer();
}
public void sync() throws IOException {
syncer();
} }
private void requestLogRoll() { private void requestLogRoll() {
@ -1309,7 +1332,9 @@ public class HLog implements Syncable {
public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs, public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem fs,
final Path regiondir) final Path regiondir)
throws IOException { throws IOException {
NavigableSet<Path> filesSorted = new TreeSet<Path>();
Path editsdir = getRegionDirRecoveredEditsDir(regiondir); Path editsdir = getRegionDirRecoveredEditsDir(regiondir);
if (!fs.exists(editsdir)) return filesSorted;
FileStatus[] files = fs.listStatus(editsdir, new PathFilter() { FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {
@Override @Override
public boolean accept(Path p) { public boolean accept(Path p) {
@ -1327,7 +1352,6 @@ public class HLog implements Syncable {
return result; return result;
} }
}); });
NavigableSet<Path> filesSorted = new TreeSet<Path>();
if (files == null) return filesSorted; if (files == null) return filesSorted;
for (FileStatus status: files) { for (FileStatus status: files) {
filesSorted.add(status.getPath()); filesSorted.add(status.getPath());

View File

@ -22,7 +22,6 @@ package org.apache.hadoop.hbase.regionserver.wal;
import static org.apache.hadoop.hbase.util.FSUtils.recoverFileLease; import static org.apache.hadoop.hbase.util.FSUtils.recoverFileLease;
import java.io.EOFException; import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
@ -44,24 +43,23 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.master.SplitLogManager.TaskFinisher.Status;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader; import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Writer; import org.apache.hadoop.hbase.regionserver.wal.HLog.Writer;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.io.MultipleIOException;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -530,7 +528,7 @@ public class HLogSplitter {
private static List<FileStatus> listAll(FileSystem fs, Path dir) private static List<FileStatus> listAll(FileSystem fs, Path dir)
throws IOException { throws IOException {
List<FileStatus> fset = new ArrayList<FileStatus>(100); List<FileStatus> fset = new ArrayList<FileStatus>(100);
FileStatus [] files = fs.listStatus(dir); FileStatus [] files = fs.exists(dir)? fs.listStatus(dir): null;
if (files != null) { if (files != null) {
for (FileStatus f : files) { for (FileStatus f : files) {
if (f.isDir()) { if (f.isDir()) {

View File

@ -43,13 +43,15 @@ public class SequenceFileLogWriter implements HLog.Writer {
private final Log LOG = LogFactory.getLog(this.getClass()); private final Log LOG = LogFactory.getLog(this.getClass());
// The sequence file we delegate to. // The sequence file we delegate to.
private SequenceFile.Writer writer; private SequenceFile.Writer writer;
// The dfsclient out stream gotten made accessible or null if not available. // This is the FSDataOutputStream instance that is the 'out' instance
private OutputStream dfsClient_out; // in the SequenceFile.Writer 'writer' instance above.
// The syncFs method from hdfs-200 or null if not available. private FSDataOutputStream writer_out;
private Method syncFs;
private Class<? extends HLogKey> keyClass; private Class<? extends HLogKey> keyClass;
private Method syncFs = null;
private Method hflush = null;
/** /**
* Default constructor. * Default constructor.
*/ */
@ -88,9 +90,62 @@ public class SequenceFileLogWriter implements HLog.Writer {
null, null,
new Metadata()); new Metadata());
this.writer_out = getSequenceFilePrivateFSDataOutputStreamAccessible();
this.syncFs = getSyncFs();
this.hflush = getHFlush();
String msg =
"syncFs=" + (this.syncFs != null) + ", hflush=" + (this.hflush != null);
if (this.syncFs != null || this.hflush != null) {
LOG.debug(msg);
} else {
LOG.warn("No sync support! " + msg);
}
}
/**
* Now do dirty work to see if syncFs is available on the backing this.writer.
* It will be available in branch-0.20-append and in CDH3.
* @return The syncFs method or null if not available.
* @throws IOException
*/
private Method getSyncFs()
throws IOException {
Method m = null;
try {
// function pointer to writer.syncFs() method; present when sync is hdfs-200.
m = this.writer.getClass().getMethod("syncFs", new Class<?> []{});
} catch (SecurityException e) {
throw new IOException("Failed test for syncfs", e);
} catch (NoSuchMethodException e) {
// Not available
}
return m;
}
/**
* See if hflush (0.21 and 0.22 hadoop) is available.
* @return The hflush method or null if not available.
* @throws IOException
*/
private Method getHFlush()
throws IOException {
Method m = null;
try {
Class<? extends OutputStream> c = getWriterFSDataOutputStream().getClass();
m = c.getMethod("hflush", new Class<?> []{});
} catch (SecurityException e) {
throw new IOException("Failed test for hflush", e);
} catch (NoSuchMethodException e) {
// Ignore
}
return m;
}
// Get at the private FSDataOutputStream inside in SequenceFile so we can // Get at the private FSDataOutputStream inside in SequenceFile so we can
// call sync on it. Make it accessible. Stash it aside for call up in // call sync on it. Make it accessible.
// the sync method. private FSDataOutputStream getSequenceFilePrivateFSDataOutputStreamAccessible()
throws IOException {
FSDataOutputStream out = null;
final Field fields [] = this.writer.getClass().getDeclaredFields(); final Field fields [] = this.writer.getClass().getDeclaredFields();
final String fieldName = "out"; final String fieldName = "out";
for (int i = 0; i < fields.length; ++i) { for (int i = 0; i < fields.length; ++i) {
@ -98,34 +153,17 @@ public class SequenceFileLogWriter implements HLog.Writer {
try { try {
// Make the 'out' field up in SF.Writer accessible. // Make the 'out' field up in SF.Writer accessible.
fields[i].setAccessible(true); fields[i].setAccessible(true);
FSDataOutputStream out = out = (FSDataOutputStream)fields[i].get(this.writer);
(FSDataOutputStream)fields[i].get(this.writer);
this.dfsClient_out = out.getWrappedStream();
break; break;
} catch (IllegalAccessException ex) { } catch (IllegalAccessException ex) {
throw new IOException("Accessing " + fieldName, ex); throw new IOException("Accessing " + fieldName, ex);
}
}
}
// Now do dirty work to see if syncFs is available.
// Test if syncfs is available.
Method m = null;
boolean append = conf.getBoolean("dfs.support.append", false);
if (append) {
try {
// function pointer to writer.syncFs()
m = this.writer.getClass().getMethod("syncFs", new Class<?> []{});
} catch (SecurityException e) { } catch (SecurityException e) {
throw new IOException("Failed test for syncfs", e); // TODO Auto-generated catch block
} catch (NoSuchMethodException e) { e.printStackTrace();
// Not available
} }
} }
this.syncFs = m; }
LOG.info((this.syncFs != null)? return out;
"Using syncFs -- HDFS-200":
("syncFs -- HDFS-200 -- not available, dfs.support.append=" + append));
} }
@Override @Override
@ -146,6 +184,12 @@ public class SequenceFileLogWriter implements HLog.Writer {
} catch (Exception e) { } catch (Exception e) {
throw new IOException("Reflection", e); throw new IOException("Reflection", e);
} }
} else if (this.hflush != null) {
try {
this.hflush.invoke(getWriterFSDataOutputStream(), HLog.NO_ARGS);
} catch (Exception e) {
throw new IOException("Reflection", e);
}
} }
} }
@ -158,7 +202,7 @@ public class SequenceFileLogWriter implements HLog.Writer {
* @return The dfsclient out stream up inside SF.Writer made accessible, or * @return The dfsclient out stream up inside SF.Writer made accessible, or
* null if not available. * null if not available.
*/ */
public OutputStream getDFSCOutputStream() { public FSDataOutputStream getWriterFSDataOutputStream() {
return this.dfsClient_out; return this.writer_out;
} }
} }

View File

@ -737,9 +737,12 @@ public class FSUtils {
} catch (NoSuchMethodException e) { } catch (NoSuchMethodException e) {
append = false; append = false;
} }
} else { }
if (!append) {
// Look for the 0.21, 0.22, new-style append evidence.
try { try {
FSDataOutputStream.class.getMethod("hflush", new Class<?> []{}); FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
append = true;
} catch (NoSuchMethodException e) { } catch (NoSuchMethodException e) {
append = false; append = false;
} }

View File

@ -20,16 +20,19 @@
package org.apache.hadoop.hbase.util; package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.URL;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.http.HttpServer;
import org.mortbay.jetty.handler.ContextHandlerCollection; import org.mortbay.jetty.handler.ContextHandlerCollection;
import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.DefaultServlet; import org.mortbay.jetty.servlet.DefaultServlet;
import java.io.IOException;
import java.net.URL;
import java.util.Map;
/** /**
* Create a Jetty embedded server to answer http requests. The primary goal * Create a Jetty embedded server to answer http requests. The primary goal
* is to serve up status information for the server. * is to serve up status information for the server.
@ -39,6 +42,8 @@ import java.util.Map;
* "/" -> the jsp server code from (src/hbase-webapps/<name>) * "/" -> the jsp server code from (src/hbase-webapps/<name>)
*/ */
public class InfoServer extends HttpServer { public class InfoServer extends HttpServer {
private final Configuration config;
/** /**
* Create a status server on the given port. * Create a status server on the given port.
* The jsp scripts are taken from src/hbase-webapps/<code>name<code>. * The jsp scripts are taken from src/hbase-webapps/<code>name<code>.
@ -49,15 +54,19 @@ public class InfoServer extends HttpServer {
* increment by 1 until it finds a free port. * increment by 1 until it finds a free port.
* @throws IOException e * @throws IOException e
*/ */
public InfoServer(String name, String bindAddress, int port, boolean findPort) public InfoServer(String name, String bindAddress, int port, boolean findPort,
final Configuration c)
throws IOException { throws IOException {
super(name, bindAddress, port, findPort, HBaseConfiguration.create()); super(name, bindAddress, port, findPort, HBaseConfiguration.create());
webServer.addHandler(new ContextHandlerCollection()); this.config = c;
fixupLogsServletLocation();
} }
protected void addDefaultApps(ContextHandlerCollection parent, String appDir) /**
throws IOException { * Fixup where the logs app points, make it point at hbase logs rather than
super.addDefaultApps(parent, appDir); * hadoop logs.
*/
private void fixupLogsServletLocation() {
// Must be same as up in hadoop. // Must be same as up in hadoop.
final String logsContextPath = "/logs"; final String logsContextPath = "/logs";
// Now, put my logs in place of hadoops... disable old one first. // Now, put my logs in place of hadoops... disable old one first.
@ -72,21 +81,39 @@ public class InfoServer extends HttpServer {
this.defaultContexts.put(oldLogsContext, Boolean.FALSE); this.defaultContexts.put(oldLogsContext, Boolean.FALSE);
} }
// Now do my logs. // Now do my logs.
// set up the context for "/logs/" if "hadoop.log.dir" property is defined. // Set up the context for "/logs/" if "hbase.log.dir" property is defined.
String logDir = System.getProperty("hbase.log.dir"); String logDir = System.getProperty("hbase.log.dir");
if (logDir != null) { if (logDir != null) {
Context logContext = new Context(parent, "/logs"); // This is a little presumptious but seems to work.
Context logContext =
new Context((ContextHandlerCollection)this.webServer.getHandler(),
logsContextPath);
logContext.setResourceBase(logDir); logContext.setResourceBase(logDir);
logContext.addServlet(DefaultServlet.class, "/"); logContext.addServlet(DefaultServlet.class, "/");
defaultContexts.put(logContext, true); defaultContexts.put(logContext, true);
} }
} }
/**
* Get the pathname to the webapps files.
* @param appName eg "secondary" or "datanode"
* @return the pathname as a URL
* @throws FileNotFoundException if 'webapps' directory cannot be found on CLASSPATH.
*/
protected String getWebAppsPath(String appName) throws FileNotFoundException {
// Copied from the super-class.
URL url = getClass().getClassLoader().getResource("hbase-webapps/" + appName);
if (url == null)
throw new FileNotFoundException("webapps/" + appName
+ " not found in CLASSPATH");
String urlString = url.toString();
return urlString.substring(0, urlString.lastIndexOf('/'));
}
/** /**
* Get the pathname to the <code>path</code> files. * Get the pathname to the <code>path</code> files.
* @return the pathname as a URL * @return the pathname as a URL
*/ */
@Override
protected String getWebAppsPath() throws IOException { protected String getWebAppsPath() throws IOException {
// Hack: webapps is not a unique enough element to find in CLASSPATH // Hack: webapps is not a unique enough element to find in CLASSPATH
// We'll more than likely find the hadoop webapps dir. So, instead // We'll more than likely find the hadoop webapps dir. So, instead
@ -95,29 +122,9 @@ public class InfoServer extends HttpServer {
// master webapp resides is where we want this InfoServer picking up // master webapp resides is where we want this InfoServer picking up
// web applications. // web applications.
final String master = "master"; final String master = "master";
String p = getWebAppDir(master); String p = getWebAppsPath(master);
// Now strip master + the separator off the end of our context int index = p.lastIndexOf(master);
return p.substring(0, p.length() - (master.length() + 1/* The separator*/)); // Now strip master off the end if it is present
} return index == -1? p: p.substring(0, index);
private static String getWebAppsPath(final String path)
throws IOException {
URL url = InfoServer.class.getClassLoader().getResource(path);
if (url == null)
throw new IOException("hbase-webapps not found in CLASSPATH: " + path);
return url.toString();
}
/**
* Get the path for this web app
* @param webappName web app
* @return path
* @throws IOException e
*/
public static String getWebAppDir(final String webappName)
throws IOException {
String webappDir;
webappDir = getWebAppsPath("hbase-webapps/" + webappName);
return webappDir;
} }
} }

View File

@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper;
@ -1233,7 +1234,10 @@ public class HBaseTestingUtility {
Field field = this.dfsCluster.getClass().getDeclaredField("nameNode"); Field field = this.dfsCluster.getClass().getDeclaredField("nameNode");
field.setAccessible(true); field.setAccessible(true);
NameNode nn = (NameNode)field.get(this.dfsCluster); NameNode nn = (NameNode)field.get(this.dfsCluster);
nn.namesystem.leaseManager.setLeasePeriod(100, 50000); field = nn.getClass().getDeclaredField("namesystem");
field.setAccessible(true);
FSNamesystem namesystem = (FSNamesystem)field.get(nn);
namesystem.leaseManager.setLeasePeriod(100, 50000);
} }
/** /**

View File

@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.Constructor;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
@ -165,7 +166,7 @@ public class TestHFileOutputFormat {
*/ */
@Test @Test
public void test_LATEST_TIMESTAMP_isReplaced() public void test_LATEST_TIMESTAMP_isReplaced()
throws IOException, InterruptedException { throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration()); Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null; RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null; TaskAttemptContext context = null;
@ -174,8 +175,7 @@ public class TestHFileOutputFormat {
try { try {
Job job = new Job(conf); Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir); FileOutputFormat.setOutputPath(job, dir);
context = new TaskAttemptContext(job.getConfiguration(), context = getTestTaskAttemptContext(job);
new TaskAttemptID());
HFileOutputFormat hof = new HFileOutputFormat(); HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context); writer = hof.getRecordWriter(context);
final byte [] b = Bytes.toBytes("b"); final byte [] b = Bytes.toBytes("b");
@ -203,6 +203,32 @@ public class TestHFileOutputFormat {
} }
} }
/**
* @return True if the available mapreduce is post-0.20.
*/
private static boolean isPost020MapReduce() {
// Here is a coarse test for post 0.20 hadoop; TAC became an interface.
return TaskAttemptContext.class.isInterface();
}
private TaskAttemptContext getTestTaskAttemptContext(final Job job)
throws IOException, Exception {
TaskAttemptContext context;
if (isPost020MapReduce()) {
TaskAttemptID id =
TaskAttemptID.forName("attempt_200707121733_0001_m_000000_0");
Class<?> clazz =
Class.forName("org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl");
Constructor<?> c = clazz.
getConstructor(job.getConfiguration().getClass(), TaskAttemptID.class);
context = (TaskAttemptContext)c.newInstance(job.getConfiguration(), id);
} else {
context = org.apache.hadoop.hbase.mapreduce.hadoopbackport.InputSampler.
getTaskAttemptContext(job);
}
return context;
}
/** /**
* Run small MR job. * Run small MR job.
*/ */
@ -454,8 +480,7 @@ public class TestHFileOutputFormat {
* from the column family descriptor * from the column family descriptor
*/ */
@Test @Test
public void testColumnFamilyCompression() public void testColumnFamilyCompression() throws Exception {
throws IOException, InterruptedException {
Configuration conf = new Configuration(this.util.getConfiguration()); Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null; RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null; TaskAttemptContext context = null;
@ -484,8 +509,7 @@ public class TestHFileOutputFormat {
setupRandomGeneratorMapper(job); setupRandomGeneratorMapper(job);
HFileOutputFormat.configureIncrementalLoad(job, table); HFileOutputFormat.configureIncrementalLoad(job, table);
FileOutputFormat.setOutputPath(job, dir); FileOutputFormat.setOutputPath(job, dir);
context = new TaskAttemptContext(job.getConfiguration(), context = getTestTaskAttemptContext(job);
new TaskAttemptID());
HFileOutputFormat hof = new HFileOutputFormat(); HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context); writer = hof.getRecordWriter(context);

View File

@ -173,9 +173,8 @@ public class TestHLogSplit {
throws IOException { throws IOException {
AtomicBoolean stop = new AtomicBoolean(false); AtomicBoolean stop = new AtomicBoolean(false);
FileStatus[] stats = fs.listStatus(new Path("/hbase/t1")); assertFalse("Previous test should clean up table dir",
assertTrue("Previous test should clean up table dir", fs.exists(new Path("/hbase/t1")));
stats == null || stats.length == 0);
generateHLogs(-1); generateHLogs(-1);
@ -967,8 +966,7 @@ public class TestHLogSplit {
HLogSplitter.moveRecoveredEditsFromTemp("tmpdir", hbaseDir, oldLogDir, HLogSplitter.moveRecoveredEditsFromTemp("tmpdir", hbaseDir, oldLogDir,
logfile.getPath().toString(), conf); logfile.getPath().toString(), conf);
Path tdir = HTableDescriptor.getTableDir(hbaseDir, TABLE_NAME); Path tdir = HTableDescriptor.getTableDir(hbaseDir, TABLE_NAME);
FileStatus [] files = this.fs.listStatus(tdir); assertFalse(fs.exists(tdir));
assertTrue(files == null || files.length == 0);
assertEquals(0, countHLog(fs.listStatus(oldLogDir)[0].getPath(), fs, conf)); assertEquals(0, countHLog(fs.listStatus(oldLogDir)[0].getPath(), fs, conf));
} }

View File

@ -59,6 +59,7 @@ public class TestReplication {
private static Configuration conf1; private static Configuration conf1;
private static Configuration conf2; private static Configuration conf2;
private static Configuration CONF_WITH_LOCALFS;
private static ZooKeeperWatcher zkw1; private static ZooKeeperWatcher zkw1;
private static ZooKeeperWatcher zkw2; private static ZooKeeperWatcher zkw2;
@ -123,7 +124,7 @@ public class TestReplication {
setIsReplication(true); setIsReplication(true);
LOG.info("Setup second Zk"); LOG.info("Setup second Zk");
CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
utility1.startMiniCluster(2); utility1.startMiniCluster(2);
utility2.startMiniCluster(2); utility2.startMiniCluster(2);
@ -137,7 +138,6 @@ public class TestReplication {
HBaseAdmin admin2 = new HBaseAdmin(conf2); HBaseAdmin admin2 = new HBaseAdmin(conf2);
admin1.createTable(table); admin1.createTable(table);
admin2.createTable(table); admin2.createTable(table);
htable1 = new HTable(conf1, tableName); htable1 = new HTable(conf1, tableName);
htable1.setWriteBufferSize(1024); htable1.setWriteBufferSize(1024);
htable2 = new HTable(conf2, tableName); htable2 = new HTable(conf2, tableName);
@ -476,7 +476,7 @@ public class TestReplication {
testSmallBatch(); testSmallBatch();
String[] args = new String[] {"2", Bytes.toString(tableName)}; String[] args = new String[] {"2", Bytes.toString(tableName)};
Job job = VerifyReplication.createSubmittableJob(conf1, args); Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
if (job == null) { if (job == null) {
fail("Job wasn't created, see the log"); fail("Job wasn't created, see the log");
} }
@ -500,7 +500,7 @@ public class TestReplication {
} }
Delete delete = new Delete(put.getRow()); Delete delete = new Delete(put.getRow());
htable2.delete(delete); htable2.delete(delete);
job = VerifyReplication.createSubmittableJob(conf1, args); job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args);
if (job == null) { if (job == null) {
fail("Job wasn't created, see the log"); fail("Job wasn't created, see the log");
} }

View File

@ -34,7 +34,6 @@ public class TestFSUtils {
HBaseTestingUtility htu = new HBaseTestingUtility(); HBaseTestingUtility htu = new HBaseTestingUtility();
htu.getConfiguration().setBoolean("dfs.support.append", false); htu.getConfiguration().setBoolean("dfs.support.append", false);
assertFalse(FSUtils.isHDFS(htu.getConfiguration())); assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
assertFalse(FSUtils.isAppendSupported(htu.getConfiguration()));
htu.getConfiguration().setBoolean("dfs.support.append", true); htu.getConfiguration().setBoolean("dfs.support.append", true);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {