HBASE-8262 Add testcase to verify HBASE-7678's empty region split semantics change

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1464596 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Hsieh 2013-04-04 15:31:26 +00:00
parent 8465f20316
commit f357ac700d
2 changed files with 122 additions and 24 deletions

View File

@ -49,18 +49,19 @@ import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.FileSystemVersionException;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.security.AccessControlException;
@ -1311,6 +1312,57 @@ public abstract class FSUtils {
return getRootDir(conf).getFileSystem(conf);
}
/**
* Runs through the HBase rootdir/tablename and creates a reverse lookup map for
* table StoreFile names to the full Path.
* <br>
* Example...<br>
* Key = 3944417774205889744 <br>
* Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
*
* @param map map to add values. If null, this method will create and populate one to return
* @param fs The file system to use.
* @param hbaseRootDir The root directory to scan.
* @param tablename name of the table to scan.
* @return Map keyed by StoreFile name with a value of the full Path.
* @throws IOException When scanning the directory fails.
*/
public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
final FileSystem fs, final Path hbaseRootDir, byte[] tablename)
throws IOException {
if (map == null) {
map = new HashMap<String, Path>();
}
// only include the directory paths to tables
Path tableDir = new Path(hbaseRootDir, Bytes.toString(tablename));
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else
// should be regions.
PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
FileStatus[] regionDirs = fs.listStatus(tableDir);
for (FileStatus regionDir : regionDirs) {
Path dd = regionDir.getPath();
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
continue;
}
// else its a region name, now look in region for families
FileStatus[] familyDirs = fs.listStatus(dd, df);
for (FileStatus familyDir : familyDirs) {
Path family = familyDir.getPath();
// now in family, iterate over the StoreFiles and
// put in map
FileStatus[] familyStatus = fs.listStatus(family);
for (FileStatus sfStatus : familyStatus) {
Path sf = sfStatus.getPath();
map.put( sf.getName(), sf);
}
}
}
return map;
}
/**
* Runs through the HBase rootdir and creates a reverse lookup map for
* table StoreFile names to the full Path.
@ -1336,28 +1388,8 @@ public abstract class FSUtils {
PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
for (FileStatus tableDir : tableDirs) {
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else
// should be regions.
FileStatus[] regionDirs = fs.listStatus(tableDir.getPath(), df);
for (FileStatus regionDir : regionDirs) {
Path dd = regionDir.getPath();
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
continue;
}
// else its a region name, now look in region for families
FileStatus[] familyDirs = fs.listStatus(dd, df);
for (FileStatus familyDir : familyDirs) {
Path family = familyDir.getPath();
// now in family, iterate over the StoreFiles and
// put in map
FileStatus[] familyStatus = fs.listStatus(family);
for (FileStatus sfStatus : familyStatus) {
Path sf = sfStatus.getPath();
map.put( sf.getName(), sf);
}
}
}
byte[] tablename = Bytes.toBytes(tableDir.getPath().getName());
getTableStoreFilePathMap(map, fs, hbaseRootDir, tablename);
}
return map;
}

View File

@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
@ -34,6 +35,8 @@ import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -59,6 +62,8 @@ import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HBaseFsck;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@ -67,8 +72,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
import org.junit.AfterClass;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@ -640,6 +645,67 @@ public class TestSplitTransactionOnCluster {
admin.flush(tableName);
}
/**
* If a table has regions that have no store files in a region, they should split successfully
* into two regions with no store files.
*/
@Test
public void testSplitRegionWithNoStoreFiles()
throws Exception {
final byte[] tableName = Bytes.toBytes("testSplitRegionWithNoStoreFiles");
// Create table then get the single region for our new table.
createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
HRegionInfo hri = getAndCheckSingleTableRegion(regions);
ensureTableRegionNotOnSameServerAsMeta(admin, hri);
int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
// Turn off balancer so it doesn't cut in and mess up our placements.
this.admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
try {
// Precondition: we created a table with no data, no store files.
printOutRegions(regionServer, "Initial regions: ");
Configuration conf = cluster.getConfiguration();
HBaseFsck.debugLsr(conf, new Path("/"));
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = TESTING_UTIL.getDFSCluster().getFileSystem();
Map<String, Path> storefiles =
FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName);
assertEquals("Expected nothing but found " + storefiles.toString(), storefiles.size(), 0);
// find a splittable region. Refresh the regions list
regions = cluster.getRegions(tableName);
final HRegion region = findSplittableRegion(regions);
assertTrue("not able to find a splittable region", region != null);
// Now split.
SplitTransaction st = new MockedSplitTransaction(region, Bytes.toBytes("row2"));
try {
st.prepare();
st.execute(regionServer, regionServer);
} catch (IOException e) {
fail("Split execution should have succeeded with no exceptions thrown");
}
// Postcondition: split the table with no store files into two regions, but still have not
// store files
List<HRegion> daughters = cluster.getRegions(tableName);
assertTrue(daughters.size() == 2);
// check dirs
HBaseFsck.debugLsr(conf, new Path("/"));
Map<String, Path> storefilesAfter =
FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName);
assertEquals("Expected nothing but found " + storefilesAfter.toString(),
storefilesAfter.size(), 0);
} finally {
admin.setBalancerRunning(true, false);
cluster.getMaster().setCatalogJanitorEnabled(true);
}
}
private void testSplitBeforeSettingSplittingInZKInternals() throws Exception {
final byte[] tableName = Bytes.toBytes("testSplitBeforeSettingSplittingInZK");
try {