mirror of https://github.com/apache/druid.git
Migrate TestDerbyConnector to a JUnit @Rule
This commit is contained in:
parent
fdb6a6651b
commit
e38cf54bc8
|
@ -19,7 +19,6 @@
|
|||
|
||||
package io.druid.indexer.updater;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
@ -81,7 +80,6 @@ import java.nio.MappedByteBuffer;
|
|||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -92,95 +90,14 @@ public class HadoopConverterJobTest
|
|||
{
|
||||
@Rule
|
||||
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
|
||||
|
||||
@Rule
|
||||
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
|
||||
private String storageLocProperty = null;
|
||||
private File tmpSegmentDir = null;
|
||||
|
||||
private static final String DATASOURCE = "testDatasource";
|
||||
private static final String STORAGE_PROPERTY_KEY = "druid.storage.storageDirectory";
|
||||
private final MetadataStorageUpdaterJobSpec metadataStorageUpdaterJobSpc = new MetadataStorageUpdaterJobSpec()
|
||||
{
|
||||
@Override
|
||||
@JsonProperty
|
||||
public String getSegmentTable()
|
||||
{
|
||||
return "druid_segments";
|
||||
}
|
||||
|
||||
@Override
|
||||
@JsonProperty
|
||||
public String getType()
|
||||
{
|
||||
return "derby";
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String getConnectURI()
|
||||
{
|
||||
return "jdbc:derby:memory:druidTest;create=true";
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String getUser()
|
||||
{
|
||||
return "sb";
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public String getPassword()
|
||||
{
|
||||
return "sb";
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetadataStorageConnectorConfig get()
|
||||
{
|
||||
return new MetadataStorageConnectorConfig()
|
||||
{
|
||||
|
||||
public boolean isCreateTables()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
public String getHost()
|
||||
{
|
||||
return "localhost";
|
||||
}
|
||||
|
||||
public int getPort()
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
public String getConnectURI()
|
||||
{
|
||||
return "jdbc:derby:memory:druidTest;create=true";
|
||||
}
|
||||
|
||||
public String getUser()
|
||||
{
|
||||
return "sb";
|
||||
}
|
||||
|
||||
public String getPassword()
|
||||
{
|
||||
return "sb";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return "DbConnectorConfig{" +
|
||||
"createTables=" + isCreateTables() +
|
||||
", connectURI='" + getConnectURI() + '\'' +
|
||||
", user='" + getUser() + '\'' +
|
||||
", passwordProvider=" + getPassword() +
|
||||
'}';
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
private Supplier<MetadataStorageTablesConfig> metadataStorageTablesConfigSupplier;
|
||||
private DerbyConnector connector;
|
||||
|
@ -201,6 +118,20 @@ public class HadoopConverterJobTest
|
|||
@Before
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
final MetadataStorageUpdaterJobSpec metadataStorageUpdaterJobSpec = new MetadataStorageUpdaterJobSpec()
|
||||
{
|
||||
@Override
|
||||
public String getSegmentTable()
|
||||
{
|
||||
return derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetadataStorageConnectorConfig get()
|
||||
{
|
||||
return derbyConnectorRule.getMetadataConnectorConfig();
|
||||
}
|
||||
};
|
||||
final File scratchFileDir = temporaryFolder.newFolder();
|
||||
storageLocProperty = System.getProperty(STORAGE_PROPERTY_KEY);
|
||||
tmpSegmentDir = temporaryFolder.newFolder();
|
||||
|
@ -249,7 +180,7 @@ public class HadoopConverterJobTest
|
|||
"type", "static",
|
||||
"paths", tmpInputFile.getAbsolutePath()
|
||||
),
|
||||
metadataStorageUpdaterJobSpc,
|
||||
metadataStorageUpdaterJobSpec,
|
||||
tmpSegmentDir.getAbsolutePath()
|
||||
),
|
||||
new HadoopTuningConfig(
|
||||
|
@ -273,34 +204,8 @@ public class HadoopConverterJobTest
|
|||
)
|
||||
)
|
||||
);
|
||||
metadataStorageTablesConfigSupplier =
|
||||
new Supplier<MetadataStorageTablesConfig>()
|
||||
{
|
||||
@Override
|
||||
public MetadataStorageTablesConfig get()
|
||||
{
|
||||
return MetadataStorageTablesConfig.fromBase("druid");
|
||||
}
|
||||
};
|
||||
connector = new TestDerbyConnector(
|
||||
new Supplier<MetadataStorageConnectorConfig>()
|
||||
{
|
||||
@Override
|
||||
public MetadataStorageConnectorConfig get()
|
||||
{
|
||||
return metadataStorageUpdaterJobSpc.get();
|
||||
}
|
||||
},
|
||||
new Supplier<MetadataStorageTablesConfig>()
|
||||
{
|
||||
|
||||
@Override
|
||||
public MetadataStorageTablesConfig get()
|
||||
{
|
||||
return new MetadataStorageTablesConfig(null, null, null, null, null, null, null, null);
|
||||
}
|
||||
}
|
||||
);
|
||||
metadataStorageTablesConfigSupplier = derbyConnectorRule.metadataTablesConfigSupplier();
|
||||
connector = derbyConnectorRule.getConnector();
|
||||
try {
|
||||
connector.getDBI().withHandle(
|
||||
new HandleCallback<Void>()
|
||||
|
@ -313,7 +218,8 @@ public class HadoopConverterJobTest
|
|||
}
|
||||
}
|
||||
);
|
||||
} catch (CallbackFailedException e){
|
||||
}
|
||||
catch (CallbackFailedException e) {
|
||||
// Who cares
|
||||
}
|
||||
List<Jobby> jobs = ImmutableList.of(
|
||||
|
@ -322,7 +228,7 @@ public class HadoopConverterJobTest
|
|||
@Override
|
||||
public boolean run()
|
||||
{
|
||||
connector.createSegmentTable(connector.getDBI(), "druid_segments");
|
||||
connector.createSegmentTable(connector.getDBI(), metadataStorageUpdaterJobSpec.getSegmentTable());
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
|
|
@ -26,7 +26,6 @@ import com.google.common.base.Function;
|
|||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
@ -74,8 +73,6 @@ import io.druid.indexing.common.task.TaskResource;
|
|||
import io.druid.indexing.overlord.config.TaskQueueConfig;
|
||||
import io.druid.jackson.DefaultObjectMapper;
|
||||
import io.druid.metadata.IndexerSQLMetadataStorageCoordinator;
|
||||
import io.druid.metadata.MetadataStorageConnectorConfig;
|
||||
import io.druid.metadata.MetadataStorageTablesConfig;
|
||||
import io.druid.metadata.SQLMetadataStorageActionHandlerFactory;
|
||||
import io.druid.metadata.TestDerbyConnector;
|
||||
import io.druid.query.QueryRunnerFactoryConglomerate;
|
||||
|
@ -165,6 +162,9 @@ public class TaskLifecycleTest
|
|||
IR("2010-01-02T01", "a", "c", 1)
|
||||
);
|
||||
|
||||
@Rule
|
||||
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
|
||||
|
||||
private final String taskStorageType;
|
||||
|
||||
private ObjectMapper mapper;
|
||||
|
@ -343,15 +343,7 @@ public class TaskLifecycleTest
|
|||
}
|
||||
);
|
||||
} else if (taskStorageType.equals("MetadataTaskStorage")) {
|
||||
MetadataStorageTablesConfig tablesConfig = MetadataStorageTablesConfig.fromBase("test");
|
||||
testDerbyConnector = new TestDerbyConnector(
|
||||
Suppliers.ofInstance(
|
||||
new MetadataStorageConnectorConfig()
|
||||
),
|
||||
Suppliers.ofInstance(
|
||||
tablesConfig
|
||||
)
|
||||
);
|
||||
testDerbyConnector = derbyConnectorRule.getConnector();
|
||||
mapper = new DefaultObjectMapper();
|
||||
mapper.registerSubtypes(
|
||||
new NamedType(MockExceptionalFirehoseFactory.class, "mockExcepFirehoseFactory"),
|
||||
|
@ -363,7 +355,7 @@ public class TaskLifecycleTest
|
|||
ts = new MetadataTaskStorage(
|
||||
testDerbyConnector,
|
||||
new TaskStorageConfig(null),
|
||||
new SQLMetadataStorageActionHandlerFactory(testDerbyConnector, tablesConfig, mapper)
|
||||
new SQLMetadataStorageActionHandlerFactory(testDerbyConnector, derbyConnectorRule.metadataTablesConfigSupplier().get(), mapper)
|
||||
);
|
||||
} else {
|
||||
throw new RuntimeException(String.format("Unknown task storage type [%s]", taskStorageType));
|
||||
|
@ -470,9 +462,6 @@ public class TaskLifecycleTest
|
|||
public void tearDown()
|
||||
{
|
||||
tq.stop();
|
||||
if (testDerbyConnector != null) {
|
||||
testDerbyConnector.tearDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package io.druid.metadata;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
|
@ -28,9 +27,9 @@ import io.druid.jackson.DefaultObjectMapper;
|
|||
import io.druid.timeline.DataSegment;
|
||||
import io.druid.timeline.partition.LinearShardSpec;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.skife.jdbi.v2.Handle;
|
||||
import org.skife.jdbi.v2.tweak.HandleCallback;
|
||||
|
@ -40,12 +39,8 @@ import java.util.Set;
|
|||
|
||||
public class IndexerSQLMetadataStorageCoordinatorTest
|
||||
{
|
||||
|
||||
private final MetadataStorageTablesConfig tablesConfig = MetadataStorageTablesConfig.fromBase("test");
|
||||
private final TestDerbyConnector derbyConnector = new TestDerbyConnector(
|
||||
Suppliers.ofInstance(new MetadataStorageConnectorConfig()),
|
||||
Suppliers.ofInstance(tablesConfig)
|
||||
);
|
||||
@Rule
|
||||
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
|
||||
private final ObjectMapper mapper = new DefaultObjectMapper();
|
||||
private final DataSegment defaultSegment = new DataSegment(
|
||||
"dataSource",
|
||||
|
@ -72,26 +67,22 @@ public class IndexerSQLMetadataStorageCoordinatorTest
|
|||
);
|
||||
private final Set<DataSegment> segments = ImmutableSet.of(defaultSegment, defaultSegment2);
|
||||
IndexerSQLMetadataStorageCoordinator coordinator;
|
||||
private TestDerbyConnector derbyConnector;
|
||||
|
||||
@Before
|
||||
public void setUp()
|
||||
{
|
||||
derbyConnector = derbyConnectorRule.getConnector();
|
||||
mapper.registerSubtypes(LinearShardSpec.class);
|
||||
derbyConnector.createTaskTables();
|
||||
derbyConnector.createSegmentTable();
|
||||
coordinator = new IndexerSQLMetadataStorageCoordinator(
|
||||
mapper,
|
||||
tablesConfig,
|
||||
derbyConnectorRule.metadataTablesConfigSupplier().get(),
|
||||
derbyConnector
|
||||
);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown()
|
||||
{
|
||||
derbyConnector.tearDown();
|
||||
}
|
||||
|
||||
private void unUseSegment()
|
||||
{
|
||||
for (final DataSegment segment : segments) {
|
||||
|
@ -103,10 +94,11 @@ public class IndexerSQLMetadataStorageCoordinatorTest
|
|||
public Integer withHandle(Handle handle) throws Exception
|
||||
{
|
||||
return handle.createStatement(
|
||||
String.format("UPDATE %s SET used = false WHERE id = :id", tablesConfig.getSegmentsTable())
|
||||
)
|
||||
.bind("id", segment.getIdentifier())
|
||||
.execute();
|
||||
String.format(
|
||||
"UPDATE %s SET used = false WHERE id = :id",
|
||||
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable()
|
||||
)
|
||||
).bind("id", segment.getIdentifier()).execute();
|
||||
}
|
||||
}
|
||||
)
|
||||
|
@ -121,7 +113,7 @@ public class IndexerSQLMetadataStorageCoordinatorTest
|
|||
Assert.assertArrayEquals(
|
||||
mapper.writeValueAsString(defaultSegment).getBytes("UTF-8"),
|
||||
derbyConnector.lookup(
|
||||
tablesConfig.getSegmentsTable(),
|
||||
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(),
|
||||
"id",
|
||||
"payload",
|
||||
defaultSegment.getIdentifier()
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package io.druid.metadata;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
|
@ -30,13 +29,16 @@ import org.joda.time.Interval;
|
|||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
|
||||
public class MetadataSegmentManagerTest
|
||||
{
|
||||
@Rule
|
||||
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
|
||||
|
||||
private SQLMetadataSegmentManager manager;
|
||||
private TestDerbyConnector connector;
|
||||
private final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
|
||||
private final DataSegment segment1 = new DataSegment(
|
||||
|
@ -74,23 +76,18 @@ public class MetadataSegmentManagerTest
|
|||
@Before
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
final Supplier<MetadataStorageTablesConfig> dbTables = Suppliers.ofInstance(MetadataStorageTablesConfig.fromBase("test"));
|
||||
|
||||
connector = new TestDerbyConnector(
|
||||
Suppliers.ofInstance(new MetadataStorageConnectorConfig()),
|
||||
dbTables
|
||||
);
|
||||
TestDerbyConnector connector = derbyConnectorRule.getConnector();
|
||||
|
||||
manager = new SQLMetadataSegmentManager(
|
||||
jsonMapper,
|
||||
Suppliers.ofInstance(new MetadataSegmentManagerConfig()),
|
||||
dbTables,
|
||||
derbyConnectorRule.metadataTablesConfigSupplier(),
|
||||
connector
|
||||
);
|
||||
|
||||
SQLMetadataSegmentPublisher publisher = new SQLMetadataSegmentPublisher(
|
||||
jsonMapper,
|
||||
dbTables.get(),
|
||||
derbyConnectorRule.metadataTablesConfigSupplier().get(),
|
||||
connector
|
||||
);
|
||||
|
||||
|
@ -100,12 +97,6 @@ public class MetadataSegmentManagerTest
|
|||
publisher.publishSegment(segment2);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception
|
||||
{
|
||||
connector.tearDown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPoll()
|
||||
{
|
||||
|
|
|
@ -19,6 +19,7 @@ package io.druid.metadata;
|
|||
import com.google.common.base.Suppliers;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.skife.jdbi.v2.Handle;
|
||||
import org.skife.jdbi.v2.tweak.HandleCallback;
|
||||
|
@ -28,15 +29,16 @@ import java.util.LinkedList;
|
|||
|
||||
public class SQLMetadataConnectorTest
|
||||
{
|
||||
@Rule
|
||||
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
|
||||
|
||||
private TestDerbyConnector connector;
|
||||
private MetadataStorageTablesConfig tablesConfig = MetadataStorageTablesConfig.fromBase("test");
|
||||
private MetadataStorageTablesConfig tablesConfig;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
connector = new TestDerbyConnector(
|
||||
Suppliers.ofInstance(new MetadataStorageConnectorConfig()),
|
||||
Suppliers.ofInstance(tablesConfig)
|
||||
);
|
||||
connector = derbyConnectorRule.getConnector();
|
||||
tablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -32,9 +32,9 @@ import io.druid.server.coordinator.rules.Rule;
|
|||
import io.druid.server.metrics.NoopServiceEmitter;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.Assert;
|
||||
import org.skife.jdbi.v2.Handle;
|
||||
import org.skife.jdbi.v2.tweak.HandleCallback;
|
||||
|
||||
|
@ -44,8 +44,10 @@ import java.util.Map;
|
|||
|
||||
public class SQLMetadataRuleManagerTest
|
||||
{
|
||||
@org.junit.Rule
|
||||
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
|
||||
private TestDerbyConnector connector;
|
||||
private MetadataStorageTablesConfig tablesConfig = MetadataStorageTablesConfig.fromBase("test");
|
||||
private MetadataStorageTablesConfig tablesConfig;
|
||||
private SQLMetadataRuleManager ruleManager;
|
||||
private AuditManager auditManager;
|
||||
private final ObjectMapper mapper = new DefaultObjectMapper();
|
||||
|
@ -54,10 +56,8 @@ public class SQLMetadataRuleManagerTest
|
|||
@Before
|
||||
public void setUp()
|
||||
{
|
||||
connector = new TestDerbyConnector(
|
||||
Suppliers.ofInstance(new MetadataStorageConnectorConfig()),
|
||||
Suppliers.ofInstance(tablesConfig)
|
||||
);
|
||||
connector = derbyConnectorRule.getConnector();
|
||||
tablesConfig = derbyConnectorRule.metadataTablesConfigSupplier().get();
|
||||
connector.createAuditTable();
|
||||
auditManager = new SQLAuditManager(
|
||||
connector,
|
||||
|
@ -116,7 +116,7 @@ public class SQLMetadataRuleManagerTest
|
|||
ruleManager.overrideRule(
|
||||
"test_dataSource",
|
||||
rules,
|
||||
auditInfo
|
||||
auditInfo
|
||||
);
|
||||
// fetch rules from metadata storage
|
||||
ruleManager.poll();
|
||||
|
@ -127,8 +127,8 @@ public class SQLMetadataRuleManagerTest
|
|||
List<AuditEntry> auditEntries = auditManager.fetchAuditHistory("test_dataSource", "rules", null);
|
||||
Assert.assertEquals(1, auditEntries.size());
|
||||
AuditEntry entry = auditEntries.get(0);
|
||||
Assert.assertEquals(mapper.writeValueAsString(rules),entry.getPayload());
|
||||
Assert.assertEquals(auditInfo,entry.getAuditInfo());
|
||||
Assert.assertEquals(mapper.writeValueAsString(rules), entry.getPayload());
|
||||
Assert.assertEquals(auditInfo, entry.getAuditInfo());
|
||||
Assert.assertEquals("test_dataSource", entry.getKey());
|
||||
}
|
||||
|
||||
|
|
|
@ -20,16 +20,15 @@ package io.druid.metadata;
|
|||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Suppliers;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.metamx.common.Pair;
|
||||
import io.druid.jackson.DefaultObjectMapper;
|
||||
import org.joda.time.DateTime;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.HashSet;
|
||||
|
@ -37,20 +36,16 @@ import java.util.Map;
|
|||
|
||||
public class SQLMetadataStorageActionHandlerTest
|
||||
{
|
||||
@Rule
|
||||
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
|
||||
|
||||
private static final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
private TestDerbyConnector connector;
|
||||
private MetadataStorageTablesConfig tablesConfig = MetadataStorageTablesConfig.fromBase("test");
|
||||
private SQLMetadataStorageActionHandler<Map<String, Integer>, Map<String, Integer>, Map<String, String>, Map<String, Integer>> handler;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
MetadataStorageConnectorConfig config = new MetadataStorageConnectorConfig();
|
||||
|
||||
connector = new TestDerbyConnector(
|
||||
Suppliers.ofInstance(config),
|
||||
Suppliers.ofInstance(tablesConfig)
|
||||
);
|
||||
TestDerbyConnector connector = derbyConnectorRule.getConnector();
|
||||
|
||||
final String entryType = "entry";
|
||||
final String entryTable = "entries";
|
||||
|
@ -107,12 +102,6 @@ public class SQLMetadataStorageActionHandlerTest
|
|||
);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown()
|
||||
{
|
||||
connector.tearDown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEntryAndStatus() throws Exception
|
||||
{
|
||||
|
|
|
@ -18,31 +18,112 @@
|
|||
package io.druid.metadata;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.base.Suppliers;
|
||||
import io.druid.metadata.storage.derby.DerbyConnector;
|
||||
import org.junit.Assert;
|
||||
import org.junit.rules.ExternalResource;
|
||||
import org.skife.jdbi.v2.DBI;
|
||||
import org.skife.jdbi.v2.exceptions.UnableToObtainConnectionException;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.util.UUID;
|
||||
|
||||
public class TestDerbyConnector extends DerbyConnector
|
||||
{
|
||||
private final String jdbcUri;
|
||||
|
||||
public TestDerbyConnector(
|
||||
Supplier<MetadataStorageConnectorConfig> config,
|
||||
Supplier<MetadataStorageTablesConfig> dbTables
|
||||
)
|
||||
{
|
||||
super(config, dbTables, new DBI("jdbc:derby:memory:druidTest;create=true"));
|
||||
this(config, dbTables, "jdbc:derby:memory:druidTest" + dbSafeUUID());
|
||||
}
|
||||
|
||||
protected TestDerbyConnector(
|
||||
Supplier<MetadataStorageConnectorConfig> config,
|
||||
Supplier<MetadataStorageTablesConfig> dbTables,
|
||||
String jdbcUri
|
||||
)
|
||||
{
|
||||
super(config, dbTables, new DBI(jdbcUri + ";create=true"));
|
||||
this.jdbcUri = jdbcUri;
|
||||
}
|
||||
|
||||
public void tearDown()
|
||||
{
|
||||
try {
|
||||
new DBI("jdbc:derby:memory:druidTest;drop=true").open().close();
|
||||
} catch(UnableToObtainConnectionException e) {
|
||||
new DBI(jdbcUri + ";drop=true").open().close();
|
||||
}
|
||||
catch (UnableToObtainConnectionException e) {
|
||||
SQLException cause = (SQLException) e.getCause();
|
||||
// error code "08006" indicates proper shutdown
|
||||
Assert.assertEquals("08006", cause.getSQLState());
|
||||
Assert.assertEquals(String.format("Derby not shutdown: [%s]", cause.toString()), "08006", cause.getSQLState());
|
||||
}
|
||||
}
|
||||
|
||||
private static String dbSafeUUID()
|
||||
{
|
||||
return UUID.randomUUID().toString().replace("-", "");
|
||||
}
|
||||
|
||||
public String getJdbcUri()
|
||||
{
|
||||
return jdbcUri;
|
||||
}
|
||||
|
||||
public static class DerbyConnectorRule extends ExternalResource
|
||||
{
|
||||
private TestDerbyConnector connector;
|
||||
private final Supplier<MetadataStorageTablesConfig> dbTables;
|
||||
private final MetadataStorageConnectorConfig connectorConfig;
|
||||
|
||||
public DerbyConnectorRule()
|
||||
{
|
||||
this(Suppliers.ofInstance(MetadataStorageTablesConfig.fromBase("druidTest")));
|
||||
}
|
||||
|
||||
public DerbyConnectorRule(
|
||||
Supplier<MetadataStorageTablesConfig> dbTables
|
||||
)
|
||||
{
|
||||
this.dbTables = dbTables;
|
||||
this.connectorConfig = new MetadataStorageConnectorConfig()
|
||||
{
|
||||
@Override
|
||||
public String getConnectURI()
|
||||
{
|
||||
return connector.getJdbcUri();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void before() throws Throwable
|
||||
{
|
||||
connector = new TestDerbyConnector(Suppliers.ofInstance(connectorConfig), dbTables);
|
||||
connector.getDBI().open().close(); // create db
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void after()
|
||||
{
|
||||
connector.tearDown();
|
||||
}
|
||||
|
||||
public TestDerbyConnector getConnector()
|
||||
{
|
||||
return connector;
|
||||
}
|
||||
|
||||
public MetadataStorageConnectorConfig getMetadataConnectorConfig()
|
||||
{
|
||||
return connectorConfig;
|
||||
}
|
||||
|
||||
public Supplier<MetadataStorageTablesConfig> metadataTablesConfigSupplier()
|
||||
{
|
||||
return dbTables;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,20 +18,18 @@
|
|||
package io.druid.server.audit;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Suppliers;
|
||||
import io.druid.audit.AuditEntry;
|
||||
import io.druid.audit.AuditInfo;
|
||||
import io.druid.audit.AuditManager;
|
||||
import io.druid.jackson.DefaultObjectMapper;
|
||||
import io.druid.metadata.MetadataStorageConnectorConfig;
|
||||
import io.druid.metadata.MetadataStorageTablesConfig;
|
||||
import io.druid.metadata.TestDerbyConnector;
|
||||
import io.druid.server.metrics.NoopServiceEmitter;
|
||||
import junit.framework.Assert;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.skife.jdbi.v2.Handle;
|
||||
import org.skife.jdbi.v2.tweak.HandleCallback;
|
||||
|
@ -41,8 +39,10 @@ import java.util.List;
|
|||
|
||||
public class SQLAuditManagerTest
|
||||
{
|
||||
@Rule
|
||||
public final TestDerbyConnector.DerbyConnectorRule derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();
|
||||
|
||||
private TestDerbyConnector connector;
|
||||
private MetadataStorageTablesConfig tablesConfig = MetadataStorageTablesConfig.fromBase("test");
|
||||
private AuditManager auditManager;
|
||||
|
||||
private final ObjectMapper mapper = new DefaultObjectMapper();
|
||||
|
@ -51,14 +51,11 @@ public class SQLAuditManagerTest
|
|||
@Before
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
connector = new TestDerbyConnector(
|
||||
Suppliers.ofInstance(new MetadataStorageConnectorConfig()),
|
||||
Suppliers.ofInstance(tablesConfig)
|
||||
);
|
||||
connector = derbyConnectorRule.getConnector();
|
||||
connector.createAuditTable();
|
||||
auditManager = new SQLAuditManager(
|
||||
connector,
|
||||
Suppliers.ofInstance(tablesConfig),
|
||||
derbyConnectorRule.metadataTablesConfigSupplier(),
|
||||
new NoopServiceEmitter(),
|
||||
mapper,
|
||||
new SQLAuditManagerConfig()
|
||||
|
@ -99,7 +96,12 @@ public class SQLAuditManagerTest
|
|||
new DateTime("2013-01-01T00:00:00Z")
|
||||
);
|
||||
auditManager.doAudit(entry);
|
||||
byte[] payload = connector.lookup(tablesConfig.getAuditTable(), "audit_key", "payload", "testKey");
|
||||
byte[] payload = connector.lookup(
|
||||
derbyConnectorRule.metadataTablesConfigSupplier().get().getAuditTable(),
|
||||
"audit_key",
|
||||
"payload",
|
||||
"testKey"
|
||||
);
|
||||
AuditEntry dbEntry = mapper.readValue(payload, AuditEntry.class);
|
||||
Assert.assertEquals(entry, dbEntry);
|
||||
|
||||
|
@ -136,7 +138,7 @@ public class SQLAuditManagerTest
|
|||
@After
|
||||
public void cleanup()
|
||||
{
|
||||
dropTable(tablesConfig.getAuditTable());
|
||||
dropTable(derbyConnectorRule.metadataTablesConfigSupplier().get().getAuditTable());
|
||||
}
|
||||
|
||||
private void dropTable(final String tableName)
|
||||
|
|
Loading…
Reference in New Issue