diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 4243f5b481a..08ecd632c62 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -88,8 +88,8 @@ public class TestBackupBase { protected static TableName table3 = TableName.valueOf("table3"); protected static TableName table4 = TableName.valueOf("table4"); - protected static TableName table1_restore = TableName.valueOf("ns1:table1_restore"); - protected static TableName table2_restore = TableName.valueOf("ns2:table2_restore"); + protected static TableName table1_restore = TableName.valueOf("default:table1"); + protected static TableName table2_restore = TableName.valueOf("ns2:table2"); protected static TableName table3_restore = TableName.valueOf("ns3:table3_restore"); protected static TableName table4_restore = TableName.valueOf("ns4:table4_restore"); @@ -404,7 +404,7 @@ public class TestBackupBase { protected static void createTables() throws Exception { long tid = System.currentTimeMillis(); - table1 = TableName.valueOf("ns1:test-" + tid); + table1 = TableName.valueOf("test-" + tid); HBaseAdmin ha = TEST_UTIL.getHBaseAdmin(); // Create namespaces diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 0bce7693739..b74f42fb839 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -163,14 +163,14 @@ public class TestIncrementalBackup extends TestBackupBase { String backupIdIncMultiple2 = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple2)); - // #4 - restore full backup for all tables, without overwrite + // #4 - restore full backup for all tables TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; LOG.debug("Restoring full " + backupIdFull); client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, - tablesRestoreFull, tablesMapFull, false)); + tablesRestoreFull, tablesMapFull, true)); // #5.1 - check tables for full restore HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 3b04c0b00b3..a403455d6d9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -27,6 +27,7 @@ import java.io.UnsupportedEncodingException; import java.net.InetSocketAddress; import java.net.URLDecoder; import java.net.URLEncoder; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -251,6 +252,9 @@ public class HFileOutputFormat2 byte[] tableNameBytes = null; if (writeMultipleTables) { tableNameBytes = MultiTableHFileOutputFormat.getTableName(row.get()); + tableNameBytes = + TableName.valueOf(tableNameBytes).getNameWithNamespaceInclAsString() + .getBytes(Charset.defaultCharset()); if (!allTableNames.contains(Bytes.toString(tableNameBytes))) { throw new IllegalArgumentException("TableName '" + Bytes.toString(tableNameBytes) + "' not" + " expected"); @@ -639,7 +643,10 @@ public class HFileOutputFormat2 for( TableInfo tableInfo : multiTableInfo ) { regionLocators.add(tableInfo.getRegionLocator()); - allTableNames.add(tableInfo.getRegionLocator().getName().getNameAsString()); + String tn = writeMultipleTables? + tableInfo.getRegionLocator().getName().getNameWithNamespaceInclAsString(): + tableInfo.getRegionLocator().getName().getNameAsString(); + allTableNames.add(tn); tableDescriptors.add(tableInfo.getTableDescriptor()); } // Record tablenames for creating writer by favored nodes, and decoding compression, block size and other attributes of columnfamily per table diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 710a94c89c6..09444acc4ed 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -627,15 +627,19 @@ public class TestHFileOutputFormat2 { Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); // Generate the bulk load files runIncrementalPELoad(conf, tableInfo, testDir, putSortReducer); + if (writeMultipleTables) { + testDir = new Path(testDir, "default"); + } for (Table tableSingle : allTables.values()) { // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", 0, util.countRows(tableSingle)); } int numTableDirs = 0; - for (FileStatus tf : testDir.getFileSystem(conf).listStatus(testDir)) { + FileStatus[] fss = + testDir.getFileSystem(conf).listStatus(testDir); + for (FileStatus tf: fss) { Path tablePath = testDir; - if (writeMultipleTables) { if (allTables.containsKey(tf.getPath().getName())) { ++numTableDirs; @@ -648,7 +652,8 @@ public class TestHFileOutputFormat2 { // Make sure that a directory was created for every CF int dir = 0; - for (FileStatus f : tablePath.getFileSystem(conf).listStatus(tablePath)) { + fss = tablePath.getFileSystem(conf).listStatus(tablePath); + for (FileStatus f: fss) { for (byte[] family : FAMILIES) { if (Bytes.toString(family).equals(f.getPath().getName())) { ++dir;