HBASE-3194 HBase should run on both secure and vanilla versions of Hadoop 0.20

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1032848 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-11-09 05:42:48 +00:00
parent 4dce2b26bf
commit 43f81f1943
7 changed files with 243 additions and 157 deletions

View File

@ -1122,6 +1122,8 @@ Release 0.90.0 - Unreleased
HBASE-3083 Major compaction check should use new timestamp meta HBASE-3083 Major compaction check should use new timestamp meta
information in HFiles (rather than dfs timestamp) along with information in HFiles (rather than dfs timestamp) along with
TTL to allow major even if single file TTL to allow major even if single file
HBASE-3194 HBase should run on both secure and vanilla versions of Hadoop 0.20
(Gary Helmling via Stack)
NEW FEATURES NEW FEATURES

View File

@ -20,6 +20,7 @@
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -29,6 +30,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@ -148,6 +150,7 @@ public class LocalHBaseCluster {
this.regionServerClass = this.regionServerClass =
(Class<? extends HRegionServer>)conf.getClass(HConstants.REGION_SERVER_IMPL, (Class<? extends HRegionServer>)conf.getClass(HConstants.REGION_SERVER_IMPL,
regionServerClass); regionServerClass);
for (int i = 0; i < noRegionServers; i++) { for (int i = 0; i < noRegionServers; i++) {
addRegionServer(i); addRegionServer(i);
} }
@ -169,6 +172,17 @@ public class LocalHBaseCluster {
return rst; return rst;
} }
public JVMClusterUtil.RegionServerThread addRegionServer(
final int index, User user)
throws IOException, InterruptedException {
return user.runAs(
new PrivilegedExceptionAction<JVMClusterUtil.RegionServerThread>() {
public JVMClusterUtil.RegionServerThread run() throws Exception {
return addRegionServer(index);
}
});
}
public JVMClusterUtil.MasterThread addMaster() throws IOException { public JVMClusterUtil.MasterThread addMaster() throws IOException {
return addMaster(this.masterThreads.size()); return addMaster(this.masterThreads.size());
} }
@ -185,6 +199,17 @@ public class LocalHBaseCluster {
return mt; return mt;
} }
public JVMClusterUtil.MasterThread addMaster(
final int index, User user)
throws IOException, InterruptedException {
return user.runAs(
new PrivilegedExceptionAction<JVMClusterUtil.MasterThread>() {
public JVMClusterUtil.MasterThread run() throws Exception {
return addMaster(index);
}
});
}
/** /**
* @param serverNumber * @param serverNumber
* @return region server * @return region server

View File

@ -1020,8 +1020,6 @@ public abstract class HBaseServer {
Writable value = null; Writable value = null;
CurCall.set(call); CurCall.set(call);
UserGroupInformation previous = UserGroupInformation.getCurrentUGI();
UserGroupInformation.setCurrentUser(call.connection.ticket);
try { try {
if (!started) if (!started)
throw new ServerNotRunningException("Server is not running yet"); throw new ServerNotRunningException("Server is not running yet");
@ -1031,7 +1029,6 @@ public abstract class HBaseServer {
errorClass = e.getClass().getName(); errorClass = e.getClass().getName();
error = StringUtils.stringifyException(e); error = StringUtils.stringifyException(e);
} }
UserGroupInformation.setCurrentUser(previous);
CurCall.set(null); CurCall.set(null);
if (buf.size() > buffersize) { if (buf.size() > buffersize) {

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.ReadWriteConsistencyControl; import org.apache.hadoop.hbase.regionserver.ReadWriteConsistencyControl;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
@ -67,10 +68,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper;
import com.google.common.base.Preconditions;
/** /**
* Facility for testing HBase. Replacement for * Facility for testing HBase. Replacement for
@ -357,11 +355,12 @@ public class HBaseTestingUtility {
* @param numSlaves * @param numSlaves
* @return Reference to the hbase mini hbase cluster. * @return Reference to the hbase mini hbase cluster.
* @throws IOException * @throws IOException
* @throws InterruptedException
* @see {@link #startMiniCluster()} * @see {@link #startMiniCluster()}
*/ */
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
final int numSlaves) final int numSlaves)
throws IOException { throws IOException, InterruptedException {
// Now do the mini hbase cluster. Set the hbase.rootdir in config. // Now do the mini hbase cluster. Set the hbase.rootdir in config.
createRootDir(); createRootDir();
Configuration c = new Configuration(this.conf); Configuration c = new Configuration(this.conf);
@ -382,7 +381,7 @@ public class HBaseTestingUtility {
* @param servers number of region servers * @param servers number of region servers
* @throws IOException * @throws IOException
*/ */
public void restartHBaseCluster(int servers) throws IOException { public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
this.hbaseCluster = new MiniHBaseCluster(this.conf, servers); this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
// Don't leave here till we've done a successful scan of the .META. // Don't leave here till we've done a successful scan of the .META.
HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME); HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
@ -576,6 +575,16 @@ public class HBaseTestingUtility {
return new HTable(new Configuration(getConfiguration()), tableName); return new HTable(new Configuration(getConfiguration()), tableName);
} }
/**
* Drop an existing table
* @param tableName existing table
*/
public void deleteTable(byte[] tableName) throws IOException {
HBaseAdmin admin = new HBaseAdmin(getConfiguration());
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
/** /**
* Provide an existing table name to truncate * Provide an existing table name to truncate
* @param tableName existing table * @param tableName existing table
@ -1127,20 +1136,20 @@ public class HBaseTestingUtility {
* @return A new configuration instance with a different user set into it. * @return A new configuration instance with a different user set into it.
* @throws IOException * @throws IOException
*/ */
public static Configuration setDifferentUser(final Configuration c, public static User getDifferentUser(final Configuration c,
final String differentiatingSuffix) final String differentiatingSuffix)
throws IOException { throws IOException {
FileSystem currentfs = FileSystem.get(c); FileSystem currentfs = FileSystem.get(c);
Preconditions.checkArgument(currentfs instanceof DistributedFileSystem); if (!(currentfs instanceof DistributedFileSystem)) {
return User.getCurrent();
}
// Else distributed filesystem. Make a new instance per daemon. Below // Else distributed filesystem. Make a new instance per daemon. Below
// code is taken from the AppendTestUtil over in hdfs. // code is taken from the AppendTestUtil over in hdfs.
Configuration c2 = new Configuration(c); String username = User.getCurrent().getName() +
String username = UserGroupInformation.getCurrentUGI().getUserName() +
differentiatingSuffix; differentiatingSuffix;
UnixUserGroupInformation.saveToConf(c2, User user = User.createUserForTesting(c, username,
UnixUserGroupInformation.UGI_PROPERTY_NAME, new String[]{"supergroup"});
new UnixUserGroupInformation(username, new String[]{"supergroup"})); return user;
return c2;
} }
/** /**

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -33,13 +35,12 @@ import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.io.MapWritable; import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
/** /**
@ -52,12 +53,7 @@ public class MiniHBaseCluster {
static final Log LOG = LogFactory.getLog(MiniHBaseCluster.class.getName()); static final Log LOG = LogFactory.getLog(MiniHBaseCluster.class.getName());
private Configuration conf; private Configuration conf;
public LocalHBaseCluster hbaseCluster; public LocalHBaseCluster hbaseCluster;
// Cache this. For some reason only works first time I get it. TODO: Figure private static int index;
// out why.
private final static UserGroupInformation UGI;
static {
UGI = UserGroupInformation.getCurrentUGI();
}
/** /**
* Start a MiniHBaseCluster. * Start a MiniHBaseCluster.
@ -66,7 +62,7 @@ public class MiniHBaseCluster {
* @throws IOException * @throws IOException
*/ */
public MiniHBaseCluster(Configuration conf, int numRegionServers) public MiniHBaseCluster(Configuration conf, int numRegionServers)
throws IOException { throws IOException, InterruptedException {
this(conf, 1, numRegionServers); this(conf, 1, numRegionServers);
} }
@ -79,7 +75,7 @@ public class MiniHBaseCluster {
*/ */
public MiniHBaseCluster(Configuration conf, int numMasters, public MiniHBaseCluster(Configuration conf, int numMasters,
int numRegionServers) int numRegionServers)
throws IOException { throws IOException, InterruptedException {
this.conf = conf; this.conf = conf;
conf.set(HConstants.MASTER_PORT, "0"); conf.set(HConstants.MASTER_PORT, "0");
init(numMasters, numRegionServers); init(numMasters, numRegionServers);
@ -165,12 +161,13 @@ public class MiniHBaseCluster {
* the FileSystem system exit hook does. * the FileSystem system exit hook does.
*/ */
public static class MiniHBaseClusterRegionServer extends HRegionServer { public static class MiniHBaseClusterRegionServer extends HRegionServer {
private static int index = 0;
private Thread shutdownThread = null; private Thread shutdownThread = null;
private User user = null;
public MiniHBaseClusterRegionServer(Configuration conf) public MiniHBaseClusterRegionServer(Configuration conf)
throws IOException, InterruptedException { throws IOException, InterruptedException {
super(setDifferentUser(conf)); super(conf);
this.user = User.getCurrent();
} }
public void setHServerInfo(final HServerInfo hsi) { public void setHServerInfo(final HServerInfo hsi) {
@ -184,19 +181,6 @@ public class MiniHBaseCluster {
* @return A new fs instance if we are up on DistributeFileSystem. * @return A new fs instance if we are up on DistributeFileSystem.
* @throws IOException * @throws IOException
*/ */
private static Configuration setDifferentUser(final Configuration c)
throws IOException {
FileSystem currentfs = FileSystem.get(c);
if (!(currentfs instanceof DistributedFileSystem)) return c;
// Else distributed filesystem. Make a new instance per daemon. Below
// code is taken from the AppendTestUtil over in hdfs.
Configuration c2 = new Configuration(c);
String username = UGI.getUserName() + ".hrs." + index++;
UnixUserGroupInformation.saveToConf(c2,
UnixUserGroupInformation.UGI_PROPERTY_NAME,
new UnixUserGroupInformation(username, new String[]{"supergroup"}));
return c2;
}
@Override @Override
protected void handleReportForDutyResponse(MapWritable c) throws IOException { protected void handleReportForDutyResponse(MapWritable c) throws IOException {
@ -208,7 +192,12 @@ public class MiniHBaseCluster {
@Override @Override
public void run() { public void run() {
try { try {
super.run(); this.user.runAs(new PrivilegedAction<Object>(){
public Object run() {
runRegionServer();
return null;
}
});
} catch (Throwable t) { } catch (Throwable t) {
LOG.error("Exception in run", t); LOG.error("Exception in run", t);
} finally { } finally {
@ -220,10 +209,27 @@ public class MiniHBaseCluster {
} }
} }
private void runRegionServer() {
super.run();
}
@Override @Override
public void kill() { public void kill() {
super.kill(); super.kill();
} }
public void abort(final String reason, final Throwable cause) {
this.user.runAs(new PrivilegedAction<Object>() {
public Object run() {
abortRegionServer(reason, cause);
return null;
}
});
}
private void abortRegionServer(String reason, Throwable cause) {
super.abort(reason, cause);
}
} }
/** /**
@ -250,17 +256,26 @@ public class MiniHBaseCluster {
} }
private void init(final int nMasterNodes, final int nRegionNodes) private void init(final int nMasterNodes, final int nRegionNodes)
throws IOException { throws IOException, InterruptedException {
try { try {
// start up a LocalHBaseCluster // start up a LocalHBaseCluster
hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, nRegionNodes, hbaseCluster = new LocalHBaseCluster(conf, nMasterNodes, 0,
MiniHBaseCluster.MiniHBaseClusterMaster.class, MiniHBaseCluster.MiniHBaseClusterMaster.class,
MiniHBaseCluster.MiniHBaseClusterRegionServer.class); MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
// manually add the regionservers as other users
for (int i=0; i<nRegionNodes; i++) {
User user = HBaseTestingUtility.getDifferentUser(conf,
".hfs."+index++);
hbaseCluster.addRegionServer(i, user);
}
hbaseCluster.startup(); hbaseCluster.startup();
} catch (IOException e) { } catch (IOException e) {
shutdown(); shutdown();
throw e; throw e;
} catch (Throwable t) { } catch (Throwable t) {
LOG.error("Error starting cluster", t);
shutdown(); shutdown();
throw new IOException("Shutting down", t); throw new IOException("Shutting down", t);
} }
@ -272,10 +287,23 @@ public class MiniHBaseCluster {
* @throws IOException * @throws IOException
* @return New RegionServerThread * @return New RegionServerThread
*/ */
public JVMClusterUtil.RegionServerThread startRegionServer() throws IOException { public JVMClusterUtil.RegionServerThread startRegionServer()
JVMClusterUtil.RegionServerThread t = this.hbaseCluster.addRegionServer(); throws IOException {
User rsUser =
HBaseTestingUtility.getDifferentUser(conf, ".hfs."+index++);
JVMClusterUtil.RegionServerThread t = null;
try {
t = rsUser.runAs(
new PrivilegedExceptionAction<JVMClusterUtil.RegionServerThread>() {
public JVMClusterUtil.RegionServerThread run() throws Exception {
return hbaseCluster.addRegionServer();
}
});
t.start(); t.start();
t.waitForServerOnline(); t.waitForServerOnline();
} catch (InterruptedException ie) {
throw new IOException("Interrupted executing UserGroupInformation.doAs()", ie);
}
return t; return t;
} }

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.lang.ref.SoftReference; import java.lang.ref.SoftReference;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
@ -50,12 +51,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
import org.apache.hadoop.security.UnixUserGroupInformation;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
@ -460,13 +461,14 @@ public class TestStore extends TestCase {
public void testHandleErrorsInFlush() throws Exception { public void testHandleErrorsInFlush() throws Exception {
LOG.info("Setting up a faulty file system that cannot write"); LOG.info("Setting up a faulty file system that cannot write");
Configuration conf = HBaseConfiguration.create(); final Configuration conf = HBaseConfiguration.create();
// Set a different UGI so we don't get the same cached LocalFS instance User user = User.createUserForTesting(conf,
conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, "testhandleerrorsinflush", new String[]{"foo"});
"testhandleerrorsinflush,foo");
// Inject our faulty LocalFileSystem // Inject our faulty LocalFileSystem
conf.setClass("fs.file.impl", FaultyFileSystem.class, conf.setClass("fs.file.impl", FaultyFileSystem.class,
FileSystem.class); FileSystem.class);
user.runAs(new PrivilegedExceptionAction<Object>() {
public Object run() throws Exception {
// Make sure it worked (above is sensitive to caching details in hadoop core) // Make sure it worked (above is sensitive to caching details in hadoop core)
FileSystem fs = FileSystem.get(conf); FileSystem fs = FileSystem.get(conf);
assertEquals(FaultyFileSystem.class, fs.getClass()); assertEquals(FaultyFileSystem.class, fs.getClass());
@ -475,9 +477,9 @@ public class TestStore extends TestCase {
init(getName(), conf); init(getName(), conf);
LOG.info("Adding some data"); LOG.info("Adding some data");
this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null)); store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null)); store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null)); store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
LOG.info("Before flush, we should have no files"); LOG.info("Before flush, we should have no files");
FileStatus[] files = fs.listStatus(store.getHomedir()); FileStatus[] files = fs.listStatus(store.getHomedir());
@ -499,6 +501,9 @@ public class TestStore extends TestCase {
paths = FileUtil.stat2Paths(files); paths = FileUtil.stat2Paths(files);
System.err.println("Got paths: " + Joiner.on(",").join(paths)); System.err.println("Got paths: " + Joiner.on(",").join(paths));
assertEquals(0, paths.length); assertEquals(0, paths.length);
return null;
}
});
} }

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.FlushRequester;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdge;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@ -179,10 +181,10 @@ public class TestWALReplay {
@Test @Test
public void testRegionMadeOfBulkLoadedFilesOnly() public void testRegionMadeOfBulkLoadedFilesOnly()
throws IOException, SecurityException, IllegalArgumentException, throws IOException, SecurityException, IllegalArgumentException,
NoSuchFieldException, IllegalAccessException { NoSuchFieldException, IllegalAccessException, InterruptedException {
final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final String tableNameStr = "testReplayEditsWrittenViaHRegion";
HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr);
Path basedir = new Path(this.hbaseRootDir, tableNameStr); final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
deleteDir(basedir); deleteDir(basedir);
HLog wal = createWAL(this.conf); HLog wal = createWAL(this.conf);
HRegion region = HRegion.openHRegion(hri, wal, this.conf); HRegion region = HRegion.openHRegion(hri, wal, this.conf);
@ -198,8 +200,11 @@ public class TestWALReplay {
wal.sync(); wal.sync();
// Now 'crash' the region by stealing its wal // Now 'crash' the region by stealing its wal
Configuration newConf = HBaseTestingUtility.setDifferentUser(this.conf, final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtility.getDifferentUser(newConf,
tableNameStr); tableNameStr);
user.runAs(new PrivilegedExceptionAction() {
public Object run() throws Exception {
runWALSplit(newConf); runWALSplit(newConf);
HLog wal2 = createWAL(newConf); HLog wal2 = createWAL(newConf);
HRegion region2 = new HRegion(basedir, wal2, FileSystem.get(newConf), HRegion region2 = new HRegion(basedir, wal2, FileSystem.get(newConf),
@ -210,6 +215,9 @@ public class TestWALReplay {
// I can't close wal1. Its been appropriated when we split. // I can't close wal1. Its been appropriated when we split.
region2.close(); region2.close();
wal2.closeAndDelete(); wal2.closeAndDelete();
return null;
}
});
} }
/** /**
@ -224,10 +232,10 @@ public class TestWALReplay {
@Test @Test
public void testReplayEditsWrittenViaHRegion() public void testReplayEditsWrittenViaHRegion()
throws IOException, SecurityException, IllegalArgumentException, throws IOException, SecurityException, IllegalArgumentException,
NoSuchFieldException, IllegalAccessException { NoSuchFieldException, IllegalAccessException, InterruptedException {
final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final String tableNameStr = "testReplayEditsWrittenViaHRegion";
HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr);
Path basedir = new Path(this.hbaseRootDir, tableNameStr); final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
deleteDir(basedir); deleteDir(basedir);
final byte[] rowName = Bytes.toBytes(tableNameStr); final byte[] rowName = Bytes.toBytes(tableNameStr);
final int countPerFamily = 10; final int countPerFamily = 10;
@ -250,7 +258,7 @@ public class TestWALReplay {
} }
} }
// Now assert edits made it in. // Now assert edits made it in.
Get g = new Get(rowName); final Get g = new Get(rowName);
Result result = region.get(g, null); Result result = region.get(g, null);
assertEquals(countPerFamily * hri.getTableDesc().getFamilies().size(), assertEquals(countPerFamily * hri.getTableDesc().getFamilies().size(),
result.size()); result.size());
@ -280,14 +288,17 @@ public class TestWALReplay {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y"); addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
} }
// Get count of edits. // Get count of edits.
Result result2 = region2.get(g, null); final Result result2 = region2.get(g, null);
assertEquals(2 * result.size(), result2.size()); assertEquals(2 * result.size(), result2.size());
wal2.sync(); wal2.sync();
// Set down maximum recovery so we dfsclient doesn't linger retrying something // Set down maximum recovery so we dfsclient doesn't linger retrying something
// long gone. // long gone.
HBaseTestingUtility.setMaxRecoveryErrorCount(wal2.getOutputStream(), 1); HBaseTestingUtility.setMaxRecoveryErrorCount(wal2.getOutputStream(), 1);
Configuration newConf = HBaseTestingUtility.setDifferentUser(this.conf, final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtility.getDifferentUser(newConf,
tableNameStr); tableNameStr);
user.runAs(new PrivilegedExceptionAction() {
public Object run() throws Exception {
runWALSplit(newConf); runWALSplit(newConf);
FileSystem newFS = FileSystem.get(newConf); FileSystem newFS = FileSystem.get(newConf);
// Make a new wal for new region open. // Make a new wal for new region open.
@ -313,6 +324,9 @@ public class TestWALReplay {
// I can't close wal1. Its been appropriated when we split. // I can't close wal1. Its been appropriated when we split.
region3.close(); region3.close();
wal3.closeAndDelete(); wal3.closeAndDelete();
return null;
}
});
} }
/** /**
@ -323,11 +337,11 @@ public class TestWALReplay {
@Test @Test
public void testReplayEditsWrittenIntoWAL() throws Exception { public void testReplayEditsWrittenIntoWAL() throws Exception {
final String tableNameStr = "testReplayEditsWrittenIntoWAL"; final String tableNameStr = "testReplayEditsWrittenIntoWAL";
HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr);
Path basedir = new Path(hbaseRootDir, tableNameStr); final Path basedir = new Path(hbaseRootDir, tableNameStr);
deleteDir(basedir); deleteDir(basedir);
fs.mkdirs(new Path(basedir, hri.getEncodedName())); fs.mkdirs(new Path(basedir, hri.getEncodedName()));
HLog wal = createWAL(this.conf); final HLog wal = createWAL(this.conf);
final byte[] tableName = Bytes.toBytes(tableNameStr); final byte[] tableName = Bytes.toBytes(tableNameStr);
final byte[] rowName = tableName; final byte[] rowName = tableName;
final byte[] regionName = hri.getEncodedNameAsBytes(); final byte[] regionName = hri.getEncodedNameAsBytes();
@ -363,8 +377,11 @@ public class TestWALReplay {
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1); HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
// Make a new conf and a new fs for the splitter to run on so we can take // Make a new conf and a new fs for the splitter to run on so we can take
// over old wal. // over old wal.
Configuration newConf = HBaseTestingUtility.setDifferentUser(this.conf, final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtility.getDifferentUser(newConf,
".replay.wal.secondtime"); ".replay.wal.secondtime");
user.runAs(new PrivilegedExceptionAction(){
public Object run() throws Exception {
runWALSplit(newConf); runWALSplit(newConf);
FileSystem newFS = FileSystem.get(newConf); FileSystem newFS = FileSystem.get(newConf);
// 100k seems to make for about 4 flushes during HRegion#initialize. // 100k seems to make for about 4 flushes during HRegion#initialize.
@ -396,6 +413,9 @@ public class TestWALReplay {
} finally { } finally {
newWal.closeAndDelete(); newWal.closeAndDelete();
} }
return null;
}
});
} }
// Flusher used in this test. Keep count of how often we are called and // Flusher used in this test. Keep count of how often we are called and