HBASE-2587 Corral where tests write data when running and make sure clean target removes all written; reverting part of last commit... a mistake
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@946884 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
cb03dcca73
commit
b2d7b19bc3
|
@ -48,8 +48,6 @@ import org.apache.hadoop.hbase.util.Writables;
|
|||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.mapred.MiniMRCluster;
|
||||
import org.apache.hadoop.security.UnixUserGroupInformation;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.zookeeper.ZooKeeper;
|
||||
|
||||
/**
|
||||
|
@ -72,13 +70,6 @@ public class HBaseTestingUtility {
|
|||
private File clusterTestBuildDir = null;
|
||||
private HBaseAdmin hbaseAdmin = null;
|
||||
|
||||
// Cache this. For some reason only works first time I get it. TODO: Figure
|
||||
// out why.
|
||||
private final static UserGroupInformation UGI;
|
||||
static {
|
||||
UGI = UserGroupInformation.getCurrentUGI();
|
||||
}
|
||||
|
||||
/**
|
||||
* System property key to get test directory value.
|
||||
*/
|
||||
|
@ -736,23 +727,4 @@ public class HBaseTestingUtility {
|
|||
public MiniDFSCluster getDFSCluster() {
|
||||
return dfsCluster;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a copy of the passed configuration laden with a new user. Use it
|
||||
* to do things like get a new FileSystem instance.
|
||||
* @param c
|
||||
* @param index Some unique number used to make a unique user.
|
||||
* @return Copy of <code>c</code> with new user stuff loaded into it.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static Configuration setDifferentUser(final Configuration c,
|
||||
final int index)
|
||||
throws IOException {
|
||||
Configuration c2 = new Configuration(c);
|
||||
String username = UGI.getUserName() + ".hrs." + index;
|
||||
UnixUserGroupInformation.saveToConf(c2,
|
||||
UnixUserGroupInformation.UGI_PROPERTY_NAME,
|
||||
new UnixUserGroupInformation(username, new String[]{"supergroup"}));
|
||||
return c2;
|
||||
}
|
||||
}
|
|
@ -35,7 +35,10 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
|
|||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.security.UnixUserGroupInformation;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
/**
|
||||
* This class creates a single process HBase cluster.
|
||||
|
@ -47,6 +50,12 @@ public class MiniHBaseCluster implements HConstants {
|
|||
static final Log LOG = LogFactory.getLog(MiniHBaseCluster.class.getName());
|
||||
private Configuration conf;
|
||||
public LocalHBaseCluster hbaseCluster;
|
||||
// Cache this. For some reason only works first time I get it. TODO: Figure
|
||||
// out why.
|
||||
private final static UserGroupInformation UGI;
|
||||
static {
|
||||
UGI = UserGroupInformation.getCurrentUGI();
|
||||
}
|
||||
|
||||
/**
|
||||
* Start a MiniHBaseCluster.
|
||||
|
@ -122,9 +131,30 @@ public class MiniHBaseCluster implements HConstants {
|
|||
|
||||
public MiniHBaseClusterRegionServer(Configuration conf)
|
||||
throws IOException {
|
||||
super(HBaseTestingUtility.setDifferentUser(conf, index++));
|
||||
super(setDifferentUser(conf));
|
||||
}
|
||||
|
||||
/*
|
||||
* @param c
|
||||
* @param currentfs We return this if we did not make a new one.
|
||||
* @param uniqueName Same name used to help identify the created fs.
|
||||
* @return A new fs instance if we are up on DistributeFileSystem.
|
||||
* @throws IOException
|
||||
*/
|
||||
private static Configuration setDifferentUser(final Configuration c)
|
||||
throws IOException {
|
||||
FileSystem currentfs = FileSystem.get(c);
|
||||
if (!(currentfs instanceof DistributedFileSystem)) return c;
|
||||
// Else distributed filesystem. Make a new instance per daemon. Below
|
||||
// code is taken from the AppendTestUtil over in hdfs.
|
||||
Configuration c2 = new Configuration(c);
|
||||
String username = UGI.getUserName() + ".hrs." + index++;
|
||||
UnixUserGroupInformation.saveToConf(c2,
|
||||
UnixUserGroupInformation.UGI_PROPERTY_NAME,
|
||||
new UnixUserGroupInformation(username, new String[]{"supergroup"}));
|
||||
return c2;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void init(MapWritable c) throws IOException {
|
||||
super.init(c);
|
||||
|
|
Loading…
Reference in New Issue