workaround for Derby requiring batch statements

+ make sure payloads are always written as bytes
This commit is contained in:
Xavier Léauté 2014-10-29 13:59:26 -07:00
parent 55d5f1f618
commit 1f171a2b86
4 changed files with 89 additions and 89 deletions

View File

@ -191,7 +191,7 @@ public class IndexerSQLMetadataStorageCoordinator implements IndexerMetadataStor
.bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? 0 : 1)
.bind("version", segment.getVersion())
.bind("used", true)
.bind("payload", jsonMapper.writeValueAsString(segment))
.bind("payload", jsonMapper.writeValueAsBytes(segment))
.execute();
log.info("Published segment [%s] to DB", segment.getIdentifier());
@ -277,7 +277,7 @@ public class IndexerSQLMetadataStorageCoordinator implements IndexerMetadataStor
String.format("UPDATE %s SET payload = :payload WHERE id = :id", dbTables.getSegmentsTable())
)
.bind("id", segment.getIdentifier())
.bind("payload", jsonMapper.writeValueAsString(segment))
.bind("payload", jsonMapper.writeValueAsBytes(segment))
.execute();
}
catch (IOException e) {

View File

@ -20,9 +20,11 @@
package io.druid.db;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.metamx.common.ISE;
import com.metamx.common.logger.Logger;
import org.apache.commons.dbcp.BasicDataSource;
import org.skife.jdbi.v2.Batch;
import org.skife.jdbi.v2.DBI;
import org.skife.jdbi.v2.Handle;
import org.skife.jdbi.v2.IDBI;
@ -75,7 +77,7 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
protected abstract boolean tableExists(Handle handle, final String tableName);
public void createTable(final IDBI dbi, final String tableName, final String sql)
public void createTable(final IDBI dbi, final String tableName, final List<String> sql)
{
try {
dbi.withHandle(
@ -86,7 +88,11 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
{
if (!tableExists(handle, tableName)) {
log.info("Creating table[%s]", tableName);
handle.createStatement(sql).execute();
final Batch batch = handle.createBatch();
for(String s : sql) {
batch.add(s);
}
batch.execute();
} else {
log.info("Table[%s] already exists", tableName);
}
@ -100,28 +106,12 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
}
}
public void createIndex(final IDBI dbi, final String tableName, final String indexName, final String columnName) {
dbi.withHandle(
new HandleCallback<Void>()
{
@Override
public Void withHandle(Handle handle) throws Exception
{
List<Map<String, Object>> table = handle.select(String.format("select * from SYS.SYSTABLES where tablename = \'%s\'", tableName.toUpperCase()));
if (table.isEmpty()) {
handle.createStatement(String.format("CREATE INDEX %1$s ON %2$s(%3$s)", indexName, tableName, columnName)).execute();
}
return null;
}
}
);
}
public void createSegmentTable(final IDBI dbi, final String tableName)
{
createTable(
dbi,
tableName,
ImmutableList.of(
String.format(
"CREATE TABLE %1$s (\n"
+ " id VARCHAR(255) NOT NULL,\n"
@ -134,10 +124,11 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
+ " used BOOLEAN NOT NULL,\n"
+ " payload %2$s NOT NULL,\n"
+ " PRIMARY KEY (id)\n"
+ ");\n"
+ "CREATE INDEX idx_%1$s_datasource ON %1$s(dataSource);\n"
+ "CREATE INDEX idx_%1$s_used ON %1$s(used);",
+ ")",
tableName, getPayloadType()
),
String.format("CREATE INDEX idx_%1$s_datasource ON %1$s(dataSource);", tableName),
String.format("CREATE INDEX idx_%1$s_used ON %1$s(used);", tableName)
)
);
}
@ -147,6 +138,7 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
createTable(
dbi,
tableName,
ImmutableList.of(
String.format(
"CREATE TABLE %1$s (\n"
+ " id VARCHAR(255) NOT NULL,\n"
@ -154,12 +146,12 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
+ " version VARCHAR(255) NOT NULL,\n"
+ " payload %2$s NOT NULL,\n"
+ " PRIMARY KEY (id)\n"
+ ");\n"
+ "CREATE INDEX idx_%1$s_datasource ON %1$s(dataSource);",
+ ")",
tableName, getPayloadType()
),
String.format("CREATE INDEX idx_%1$s_datasource ON %1$s(dataSource);", tableName)
)
);
createIndex(dbi, tableName, "rules_dataSource", "dataSource");
}
public void createConfigTable(final IDBI dbi, final String tableName)
@ -167,14 +159,16 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
createTable(
dbi,
tableName,
ImmutableList.of(
String.format(
"CREATE TABLE %1$s (\n"
+ " name VARCHAR(255) NOT NULL,\n"
+ " payload %2$s NOT NULL,\n"
+ " PRIMARY KEY(name)\n"
+ ");",
+ ")",
tableName, getPayloadType()
)
)
);
}
@ -183,6 +177,7 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
createTable(
dbi,
tableName,
ImmutableList.of(
String.format(
"CREATE TABLE %1$s (\n"
+ " id VARCHAR(255) NOT NULL,\n"
@ -192,9 +187,10 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
+ " status_payload %2$s NOT NULL,\n"
+ " active BOOLEAN NOT NULL DEFAULT FALSE,\n"
+ " PRIMARY KEY (id)\n"
+ ");\n"
+ "CREATE INDEX idx_%1$s_active_created_date ON %1$s(active, created_date);",
+ ")",
tableName, getPayloadType()
),
String.format("CREATE INDEX idx_%1$s_active_created_date ON %1$s(active, created_date);", tableName)
)
);
}
@ -204,15 +200,17 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
createTable(
dbi,
tableName,
ImmutableList.of(
String.format(
"CREATE TABLE %1$s (\n"
+ " id %2$s NOT NULL,\n"
+ " task_id VARCHAR(255) DEFAULT NULL,\n"
+ " log_payload %3$s,\n"
+ " PRIMARY KEY (id)\n"
+ ");\n"
+ "CREATE INDEX idx_%1$s_task_id ON %1$s(task_id);",
+ ")",
tableName, getSerialType(), getPayloadType()
),
String.format("CREATE INDEX idx_%1$s_task_id ON %1$s(task_id);", tableName)
)
);
}
@ -222,15 +220,17 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
createTable(
dbi,
tableName,
ImmutableList.of(
String.format(
"CREATE TABLE %1$s (\n"
+ " id %2$s NOT NULL,\n"
+ " task_id VARCHAR(255) DEFAULT NULL,\n"
+ " lock_payload %3$s,\n"
+ " PRIMARY KEY (id)\n"
+ ");\n"
+ "CREATE INDEX idx_%1$s_task_id ON %1$s(task_id);",
+ ")",
tableName, getSerialType(), getPayloadType()
),
String.format("CREATE INDEX idx_%1$s_task_id ON %1$s(task_id);", tableName)
)
);
}

View File

@ -109,7 +109,7 @@ public class SQLMetadataRuleManager implements MetadataRuleManager
.bind("id", String.format("%s_%s", defaultDatasourceName, version))
.bind("dataSource", defaultDatasourceName)
.bind("version", version)
.bind("payload", jsonMapper.writeValueAsString(defaultRules))
.bind("payload", jsonMapper.writeValueAsBytes(defaultRules))
.execute();
return null;
@ -320,7 +320,7 @@ public class SQLMetadataRuleManager implements MetadataRuleManager
.bind("id", String.format("%s_%s", dataSource, version))
.bind("dataSource", dataSource)
.bind("version", version)
.bind("payload", jsonMapper.writeValueAsString(newRules))
.bind("payload", jsonMapper.writeValueAsBytes(newRules))
.execute();
return null;

View File

@ -101,7 +101,7 @@ public class SQLMetadataSegmentPublisher implements SegmentPublisher
.bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? 0 : 1)
.bind("version", segment.getVersion())
.bind("used", true)
.bind("payload", jsonMapper.writeValueAsString(segment))
.bind("payload", jsonMapper.writeValueAsBytes(segment))
.execute();
return null;