HDFS-11802. Ozone : add DEBUG CLI support for open container db file. Contributed by Chen Liang

This commit is contained in:
Chen Liang 2017-05-12 13:13:55 -07:00 committed by Owen O'Malley
parent edff6c6a24
commit 9bc494b909
2 changed files with 97 additions and 6 deletions

View File

@ -53,6 +53,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
/** /**
* This is the CLI that can be use to convert a levelDB into a sqlite DB file. * This is the CLI that can be use to convert a levelDB into a sqlite DB file.
@ -116,6 +117,14 @@ public class SQLCLI extends Configured implements Tool {
"INSERT INTO nodePool (datanodeUUID, poolName) " + "INSERT INTO nodePool (datanodeUUID, poolName) " +
"VALUES (\"%s\", \"%s\")"; "VALUES (\"%s\", \"%s\")";
// and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO // and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO
// for openContainer.db
private static final String CREATE_OPEN_CONTAINER =
"CREATE TABLE openContainer (" +
"containerName TEXT PRIMARY KEY NOT NULL, " +
"containerUsed INTEGER NOT NULL)";
private static final String INSERT_OPEN_CONTAINER =
"INSERT INTO openContainer (containerName, containerUsed) " +
"VALUES (\"%s\", \"%s\")";
private static final Logger LOG = private static final Logger LOG =
@ -191,6 +200,9 @@ public int run(String[] args) throws Exception {
} else if (dbName.toString().equals(NODEPOOL_DB)) { } else if (dbName.toString().equals(NODEPOOL_DB)) {
LOG.info("Converting node pool DB"); LOG.info("Converting node pool DB");
convertNodePoolDB(dbPath, outPath); convertNodePoolDB(dbPath, outPath);
} else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
LOG.info("Converting open container DB");
convertOpenContainerDB(dbPath, outPath);
} else { } else {
LOG.error("Unrecognized db name {}", dbName); LOG.error("Unrecognized db name {}", dbName);
} }
@ -244,12 +256,12 @@ private void convertContainerDB(Path dbPath, Path outPath)
File dbFile = dbPath.toFile(); File dbFile = dbPath.toFile();
org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options(); org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions); try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
Connection conn = connectDB(outPath.toString())) { Connection conn = connectDB(outPath.toString());
DBIterator iter = dbStore.getIterator()) {
executeSQL(conn, CREATE_CONTAINER_INFO); executeSQL(conn, CREATE_CONTAINER_INFO);
executeSQL(conn, CREATE_CONTAINER_MEMBERS); executeSQL(conn, CREATE_CONTAINER_MEMBERS);
executeSQL(conn, CREATE_DATANODE_INFO); executeSQL(conn, CREATE_DATANODE_INFO);
DBIterator iter = dbStore.getIterator();
iter.seekToFirst(); iter.seekToFirst();
HashSet<String> uuidChecked = new HashSet<>(); HashSet<String> uuidChecked = new HashSet<>();
while (iter.hasNext()) { while (iter.hasNext()) {
@ -320,10 +332,10 @@ private void convertBlockDB(Path dbPath, Path outPath) throws Exception {
File dbFile = dbPath.toFile(); File dbFile = dbPath.toFile();
org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options(); org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions); try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
Connection conn = connectDB(outPath.toString())) { Connection conn = connectDB(outPath.toString());
DBIterator iter = dbStore.getIterator()) {
executeSQL(conn, CREATE_BLOCK_CONTAINER); executeSQL(conn, CREATE_BLOCK_CONTAINER);
DBIterator iter = dbStore.getIterator();
iter.seekToFirst(); iter.seekToFirst();
while (iter.hasNext()) { while (iter.hasNext()) {
Map.Entry<byte[], byte[]> entry = iter.next(); Map.Entry<byte[], byte[]> entry = iter.next();
@ -364,11 +376,11 @@ private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception {
File dbFile = dbPath.toFile(); File dbFile = dbPath.toFile();
org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options(); org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions); try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
Connection conn = connectDB(outPath.toString())) { Connection conn = connectDB(outPath.toString());
DBIterator iter = dbStore.getIterator()) {
executeSQL(conn, CREATE_NODE_POOL); executeSQL(conn, CREATE_NODE_POOL);
executeSQL(conn, CREATE_DATANODE_INFO); executeSQL(conn, CREATE_DATANODE_INFO);
DBIterator iter = dbStore.getIterator();
iter.seekToFirst(); iter.seekToFirst();
while (iter.hasNext()) { while (iter.hasNext()) {
Map.Entry<byte[], byte[]> entry = iter.next(); Map.Entry<byte[], byte[]> entry = iter.next();
@ -394,6 +406,42 @@ private void insertNodePoolDB(Connection conn, String blockPool,
executeSQL(conn, insertDatanodeID); executeSQL(conn, insertDatanodeID);
} }
/**
* Convert openContainer.db to sqlite db file. This is rather simple db,
* the schema has only one table:
*
* openContainer
* -------------------------------
* containerName* | containerUsed
* -------------------------------
*
* @param dbPath path to container db.
* @param outPath path to output sqlite
* @throws IOException throws exception.
*/
private void convertOpenContainerDB(Path dbPath, Path outPath)
throws Exception {
LOG.info("Create table for open container db.");
File dbFile = dbPath.toFile();
org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
try (LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
Connection conn = connectDB(outPath.toString());
DBIterator iter = dbStore.getIterator()) {
executeSQL(conn, CREATE_OPEN_CONTAINER);
iter.seekToFirst();
while (iter.hasNext()) {
Map.Entry<byte[], byte[]> entry = iter.next();
String containerName = DFSUtil.bytes2String(entry.getKey());
Long containerUsed = Long.parseLong(
DFSUtil.bytes2String(entry.getValue()));
String insertOpenContainer = String.format(
INSERT_OPEN_CONTAINER, containerName, containerUsed);
executeSQL(conn, insertOpenContainer);
}
}
}
private CommandLine parseArgs(String[] argv) private CommandLine parseArgs(String[] argv)
throws ParseException { throws ParseException {
return parser.parse(options, argv); return parser.parse(options, argv);

View File

@ -46,11 +46,13 @@
import java.sql.Statement; import java.sql.Statement;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.OzoneConsts.KB;
import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -103,6 +105,15 @@ public static void init() throws Exception {
// OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE which we set to 2. // OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE which we set to 2.
// so the first allocateBlock() will create two containers. A random one // so the first allocateBlock() will create two containers. A random one
// is assigned for the block. // is assigned for the block.
// loop until both the two datanodes are up, try up to about 4 seconds.
for (int c = 0; c < 40; c++) {
if (nodeManager.getAllNodes().size() == 2) {
break;
}
Thread.sleep(100);
}
assertEquals(2, nodeManager.getAllNodes().size());
AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE); AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE);
pipeline1 = ab1.getPipeline(); pipeline1 = ab1.getPipeline();
blockContainerMap.put(ab1.getKey(), pipeline1.getContainerName()); blockContainerMap.put(ab1.getKey(), pipeline1.getContainerName());
@ -184,6 +195,38 @@ public void testConvertNodepoolDB() throws Exception {
Files.delete(Paths.get(dbOutPath)); Files.delete(Paths.get(dbOutPath));
} }
@Test
public void testConvertOpenContainerDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
String dbPath = dbRootPath + "/" + OPEN_CONTAINERS_DB;
String[] args = {"-p", dbPath, "-o", dbOutPath};
cli.run(args);
Connection conn = connectDB(dbOutPath);
String sql = "SELECT * FROM openContainer";
ResultSet rs = executeQuery(conn, sql);
HashSet<String> expectedContainer = new HashSet<>();
expectedContainer.add(pipeline1.getContainerName());
expectedContainer.add(pipeline2.getContainerName());
// the number of allocated blocks can vary, and they can be located
// at either of the two containers. We only check if the total used
// is equal to block size * # of blocks.
long totalUsed = 0;
while(rs.next()) {
String containerName = rs.getString("containerName");
long containerUsed = rs.getLong("containerUsed");
totalUsed += containerUsed;
assertTrue(expectedContainer.remove(containerName));
}
assertEquals(0, expectedContainer.size());
assertEquals(blockContainerMap.keySet().size() * DEFAULT_BLOCK_SIZE,
totalUsed);
Files.delete(Paths.get(dbOutPath));
}
@Test @Test
public void testConvertContainerDB() throws Exception { public void testConvertContainerDB() throws Exception {
String dbOutPath = cluster.getDataDirectory() + "/out_sql.db"; String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";