mirror of https://github.com/apache/druid.git
fixed syntax error in creating index statements; added a test for MetadataConnector
This commit is contained in:
parent
3cc1b2e690
commit
df7566701d
|
@ -130,8 +130,8 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
|
|||
+ ")",
|
||||
tableName, getPayloadType()
|
||||
),
|
||||
String.format("CREATE INDEX idx_%1$s_datasource ON %1$s(dataSource);", tableName),
|
||||
String.format("CREATE INDEX idx_%1$s_used ON %1$s(used);", tableName)
|
||||
String.format("CREATE INDEX idx_%1$s_datasource ON %1$s(dataSource)", tableName),
|
||||
String.format("CREATE INDEX idx_%1$s_used ON %1$s(used)", tableName)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
|
|||
+ ")",
|
||||
tableName, getPayloadType()
|
||||
),
|
||||
String.format("CREATE INDEX idx_%1$s_datasource ON %1$s(dataSource);", tableName)
|
||||
String.format("CREATE INDEX idx_%1$s_datasource ON %1$s(dataSource)", tableName)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
|
|||
+ ")",
|
||||
tableName, getPayloadType()
|
||||
),
|
||||
String.format("CREATE INDEX idx_%1$s_active_created_date ON %1$s(active, created_date);", tableName)
|
||||
String.format("CREATE INDEX idx_%1$s_active_created_date ON %1$s(active, created_date)", tableName)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
|
|||
+ ")",
|
||||
tableName, getSerialType(), getPayloadType()
|
||||
),
|
||||
String.format("CREATE INDEX idx_%1$s_task_id ON %1$s(task_id);", tableName)
|
||||
String.format("CREATE INDEX idx_%1$s_task_id ON %1$s(task_id)", tableName)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
|
|||
+ ")",
|
||||
tableName, getSerialType(), getPayloadType()
|
||||
),
|
||||
String.format("CREATE INDEX idx_%1$s_task_id ON %1$s(task_id);", tableName)
|
||||
String.format("CREATE INDEX idx_%1$s_task_id ON %1$s(task_id)", tableName)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
package io.druid.db;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.base.Throwables;
|
||||
import io.druid.jackson.DefaultObjectMapper;
|
||||
import junit.framework.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.skife.jdbi.v2.Batch;
|
||||
import org.skife.jdbi.v2.DBI;
|
||||
import org.skife.jdbi.v2.Handle;
|
||||
import org.skife.jdbi.v2.tweak.HandleCallback;
|
||||
|
||||
import java.lang.Exception;
|
||||
import java.util.LinkedList;
|
||||
|
||||
|
||||
public class SQLMetadataConnectorTest
|
||||
{
|
||||
private static final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
private DerbyConnector connector;
|
||||
private DBI dbi;
|
||||
private MetadataStorageTablesConfig tablesConfig = MetadataStorageTablesConfig.fromBase("test");
|
||||
private String TABLE_NAME = tablesConfig.getSegmentsTable();
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
MetadataStorageConnectorConfig config = jsonReadWriteRead(
|
||||
"{"
|
||||
+ "\"type\" : \"db\",\n"
|
||||
+ "\"segmentTable\" : \"segments\"\n"
|
||||
+ "}",
|
||||
MetadataStorageConnectorConfig.class
|
||||
);
|
||||
|
||||
connector = new DerbyConnector(Suppliers.ofInstance(config),
|
||||
Suppliers.ofInstance(tablesConfig));
|
||||
dbi = connector.getDBI();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateTables()
|
||||
{
|
||||
final LinkedList<String> tables = new LinkedList<String>();
|
||||
tables.add(tablesConfig.getConfigTable());
|
||||
tables.add(tablesConfig.getSegmentsTable());
|
||||
tables.add(tablesConfig.getRulesTable());
|
||||
tables.add(tablesConfig.getTaskLockTable());
|
||||
tables.add(tablesConfig.getTaskLogTable());
|
||||
tables.add(tablesConfig.getTasksTable());
|
||||
try {
|
||||
connector.createSegmentTable();
|
||||
connector.createConfigTable();
|
||||
connector.createRulesTable();
|
||||
connector.createTaskTables();
|
||||
|
||||
dbi.withHandle(
|
||||
new HandleCallback<Void>()
|
||||
{
|
||||
@Override
|
||||
public Void withHandle(Handle handle) throws Exception
|
||||
{
|
||||
for(String table : tables) {
|
||||
Assert.assertTrue(String.format("table $s was not created!", table), connector.tableExists(handle, table));
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
finally {
|
||||
dbi.withHandle(
|
||||
new HandleCallback<Void>()
|
||||
{
|
||||
@Override
|
||||
public Void withHandle(Handle handle) throws Exception
|
||||
{
|
||||
final Batch batch = handle.createBatch();
|
||||
for (String table : tables) {
|
||||
batch.add(String.format("DROP TABLE %s", table));
|
||||
}
|
||||
batch.execute();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsertOrUpdate() {
|
||||
try {
|
||||
connector.createSegmentTable(dbi, TABLE_NAME);
|
||||
connector.insertOrUpdate(TABLE_NAME, "dummy1", "dummy2", "emperor", "penguin".getBytes());
|
||||
dbi.withHandle(
|
||||
new HandleCallback<Void>()
|
||||
{
|
||||
@Override
|
||||
public Void withHandle(Handle handle) throws Exception
|
||||
{
|
||||
Assert.assertEquals(connector.lookup(TABLE_NAME, "dummy1", "dummy2", "emperor"),
|
||||
"penguin".getBytes());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
connector.insertOrUpdate(TABLE_NAME, "dummy1", "dummy2", "emperor", "penguin chick".getBytes());
|
||||
|
||||
dbi.withHandle(
|
||||
new HandleCallback<Void>()
|
||||
{
|
||||
@Override
|
||||
public Void withHandle(Handle handle) throws Exception
|
||||
{
|
||||
Assert.assertEquals(connector.lookup(TABLE_NAME, "dummy1", "dummy2", "emperor"),
|
||||
"penguin chick".getBytes());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
} catch (Exception e) {
|
||||
} finally {
|
||||
dbi.withHandle(
|
||||
new HandleCallback<Void>()
|
||||
{
|
||||
@Override
|
||||
public Void withHandle(Handle handle) throws Exception
|
||||
{
|
||||
handle.createStatement(String.format("DROP TABLE %s", TABLE_NAME))
|
||||
.execute();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private <T> T jsonReadWriteRead(String s, Class<T> klass)
|
||||
{
|
||||
try {
|
||||
return jsonMapper.readValue(jsonMapper.writeValueAsBytes(jsonMapper.readValue(s, klass)), klass);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue