public abstract class AbstractConnectorTest extends Object implements Testing
SourceConnector implementations using the Debezium EmbeddedEngine
with local file storage.
To use this abstract class, simply create a test class that extends it, and add one or more test methods that
starts the connector using your connector's custom configuration.
Then, your test methods can call consumeRecords(int, Consumer) to consume the specified number
of records (the supplied function gives you a chance to do something with the record).
| Modifier and Type | Class and Description |
|---|---|
protected class |
AbstractConnectorTest.SourceRecords |
Testing.Debug, Testing.Files, Testing.InterruptableFunction, Testing.Network, Testing.Print, Testing.Timer| Modifier and Type | Field and Description |
|---|---|
private BlockingQueue<org.apache.kafka.connect.source.SourceRecord> |
consumedLines |
protected io.debezium.embedded.EmbeddedEngine |
engine |
private ExecutorService |
executor |
private org.apache.kafka.connect.json.JsonConverter |
keyJsonConverter |
private org.apache.kafka.connect.json.JsonDeserializer |
keyJsonDeserializer |
private CountDownLatch |
latch |
protected org.slf4j.Logger |
logger |
org.junit.rules.TestRule |
logTestName |
protected static String |
NO_MONITORED_TABLES_WARNING |
protected static Path |
OFFSET_STORE_PATH |
protected long |
pollTimeoutInMs |
org.junit.rules.TestRule |
skipTestRule |
private org.apache.kafka.connect.json.JsonConverter |
valueJsonConverter |
private org.apache.kafka.connect.json.JsonDeserializer |
valueJsonDeserializer |
| Constructor and Description |
|---|
AbstractConnectorTest() |
| Modifier and Type | Method and Description |
|---|---|
protected String |
assertBeginTransaction(org.apache.kafka.connect.source.SourceRecord record) |
protected void |
assertConfigurationErrors(org.apache.kafka.common.config.Config config,
Field field) |
protected void |
assertConfigurationErrors(org.apache.kafka.common.config.Config config,
Field field,
int numErrors) |
protected void |
assertConfigurationErrors(org.apache.kafka.common.config.Config config,
Field field,
int minErrorsInclusive,
int maxErrorsInclusive) |
protected void |
assertConnectorIsRunning()
Assert that the connector is currently running.
|
protected void |
assertConnectorNotRunning()
Assert that the connector is NOT currently running.
|
protected void |
assertDelete(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk) |
protected void |
assertEndTransaction(org.apache.kafka.connect.source.SourceRecord record,
String expectedTxId,
long expectedEventCount,
Map<String,Number> expectedPerTableCount) |
protected void |
assertEngineIsRunning()
Assert that there was no exception in engine that would cause its termination.
|
protected void |
assertHasNoSourceQuery(org.apache.kafka.connect.source.SourceRecord record) |
protected void |
assertInsert(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk) |
protected void |
assertKey(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk) |
protected void |
assertNoConfigurationErrors(org.apache.kafka.common.config.Config config,
Field... fields) |
protected void |
assertNoRecordsToConsume()
Assert that there are no records to consume.
|
protected void |
assertOffset(org.apache.kafka.connect.source.SourceRecord record,
Map<String,?> expectedOffset) |
protected void |
assertOffset(org.apache.kafka.connect.source.SourceRecord record,
String offsetField,
Object expectedValue) |
protected void |
assertRecordTransactionMetadata(org.apache.kafka.connect.source.SourceRecord record,
String expectedTxId,
long expectedTotalOrder,
long expectedCollectionOrder) |
private void |
assertSameValue(Object actual,
Object expected) |
protected void |
assertSchemaMatchesStruct(org.apache.kafka.connect.data.SchemaAndValue value)
Assert that the supplied
Struct is valid and its schema
matches that of the supplied schema. |
protected void |
assertSchemaMatchesStruct(org.apache.kafka.connect.data.Struct struct,
org.apache.kafka.connect.data.Schema schema)
Assert that the supplied
Struct is valid and its schema
matches that of the supplied schema. |
protected void |
assertSourceQuery(org.apache.kafka.connect.source.SourceRecord record,
String query) |
protected void |
assertTombstone(org.apache.kafka.connect.source.SourceRecord record) |
protected void |
assertTombstone(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk) |
protected void |
assertUpdate(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk) |
protected void |
assertValueField(org.apache.kafka.connect.source.SourceRecord record,
String fieldPath,
Object expectedValue) |
protected org.apache.kafka.common.config.ConfigValue |
configValue(org.apache.kafka.common.config.Config config,
String fieldName) |
protected int |
consumeAvailableRecords(Consumer<org.apache.kafka.connect.source.SourceRecord> recordConsumer)
Try to consume all of the messages that have already been returned by the connector.
|
protected org.apache.kafka.connect.source.SourceRecord |
consumeRecord()
Consume a single record from the connector.
|
protected int |
consumeRecords(int numberOfRecords)
Try to consume the specified number of records from the connector, and return the actual number of records that were
consumed.
|
protected int |
consumeRecords(int numberOfRecords,
Consumer<org.apache.kafka.connect.source.SourceRecord> recordConsumer)
Try to consume the specified number of records from the connector, calling the given function for each, and return the
actual number of records that were consumed.
|
protected int |
consumeRecords(int numberOfRecords,
int breakAfterNulls,
Consumer<org.apache.kafka.connect.source.SourceRecord> recordConsumer,
boolean assertRecords)
Try to consume the specified number of records from the connector, calling the given function for each, and return the
actual number of records that were consumed.
|
protected AbstractConnectorTest.SourceRecords |
consumeRecordsByTopic(int numRecords)
Try to consume and capture exactly the specified number of records from the connector.
|
protected AbstractConnectorTest.SourceRecords |
consumeRecordsByTopic(int numRecords,
boolean assertRecords)
Try to consume and capture exactly the specified number of records from the connector.
|
protected AbstractConnectorTest.SourceRecords |
consumeRecordsByTopic(int numRecords,
int breakAfterNulls)
Try to consume and capture exactly the specified number of records from the connector.
|
protected void |
debug(org.apache.kafka.connect.source.SourceRecord record) |
protected int |
getMaximumEnqueuedRecordCount()
Get the maximum number of messages that can be obtained from the connector and held in-memory before they are
consumed by test methods using
consumeRecord(), consumeRecords(int), or
consumeRecords(int, Consumer). |
void |
initializeConnectorTestFramework() |
protected io.debezium.embedded.EmbeddedEngine.CompletionCallback |
loggingCompletion()
Create a
EmbeddedEngine.CompletionCallback that logs when the engine fails to start the connector or when the connector
stops running after completing successfully or due to an error |
protected void |
print(org.apache.kafka.connect.source.SourceRecord record) |
protected <T> Map<String,Object> |
readLastCommittedOffset(Configuration config,
Map<String,T> partition)
Utility to read the last committed offset for the specified partition.
|
protected <T> Map<Map<String,T>,Map<String,Object>> |
readLastCommittedOffsets(Configuration config,
Collection<Map<String,T>> partitions)
Utility to read the last committed offsets for the specified partitions.
|
protected void |
setConsumeTimeout(long timeout,
TimeUnit unit)
Set the maximum amount of time that the
consumeRecord(), consumeRecords(int), and
consumeRecords(int, Consumer) methods block while waiting for each record before returning null. |
protected void |
start(Class<? extends org.apache.kafka.connect.source.SourceConnector> connectorClass,
Configuration connectorConfig)
Start the connector using the supplied connector configuration, where upon completion the status of the connector is
logged.
|
protected void |
start(Class<? extends org.apache.kafka.connect.source.SourceConnector> connectorClass,
Configuration connectorConfig,
io.debezium.embedded.EmbeddedEngine.CompletionCallback callback)
Start the connector using the supplied connector configuration.
|
protected void |
start(Class<? extends org.apache.kafka.connect.source.SourceConnector> connectorClass,
Configuration connectorConfig,
io.debezium.embedded.EmbeddedEngine.CompletionCallback callback,
Predicate<org.apache.kafka.connect.source.SourceRecord> isStopRecord)
Start the connector using the supplied connector configuration.
|
protected void |
start(Class<? extends org.apache.kafka.connect.source.SourceConnector> connectorClass,
Configuration connectorConfig,
Predicate<org.apache.kafka.connect.source.SourceRecord> isStopRecord)
Start the connector using the supplied connector configuration, where upon completion the status of the connector is
logged.
|
void |
stopConnector()
Stop the connector and block until the connector has completely stopped.
|
void |
stopConnector(BooleanConsumer callback)
Stop the connector, and return whether the connector was successfully stopped.
|
protected void |
validate(org.apache.kafka.connect.source.SourceRecord record)
Validate that a
SourceRecord's key and value can each be converted to a byte[] and then back to an equivalent
SourceRecord. |
protected boolean |
waitForAvailableRecords(long timeout,
TimeUnit unit)
Wait for a maximum amount of time until the first record is available.
|
static void |
waitForSnapshotToBeCompleted(String connector,
String server) |
static void |
waitForStreamingRunning(String connector,
String server) |
static void |
waitForStreamingRunning(String connector,
String server,
String contextName) |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, waitdebug, once, once, print, print, printError, printError, printError, resetBeforeEachTest, time, timepublic org.junit.rules.TestRule skipTestRule
protected static final Path OFFSET_STORE_PATH
protected static final String NO_MONITORED_TABLES_WARNING
private ExecutorService executor
protected io.debezium.embedded.EmbeddedEngine engine
private BlockingQueue<org.apache.kafka.connect.source.SourceRecord> consumedLines
protected long pollTimeoutInMs
protected final org.slf4j.Logger logger
private CountDownLatch latch
private org.apache.kafka.connect.json.JsonConverter keyJsonConverter
private org.apache.kafka.connect.json.JsonConverter valueJsonConverter
private org.apache.kafka.connect.json.JsonDeserializer keyJsonDeserializer
private org.apache.kafka.connect.json.JsonDeserializer valueJsonDeserializer
public org.junit.rules.TestRule logTestName
public final void initializeConnectorTestFramework()
public final void stopConnector()
public void stopConnector(BooleanConsumer callback)
callback - the function that should be called with whether the connector was successfully stopped; may be nullprotected int getMaximumEnqueuedRecordCount()
consumeRecord(), consumeRecords(int), or
consumeRecords(int, Consumer).
By default this method return 100.
protected io.debezium.embedded.EmbeddedEngine.CompletionCallback loggingCompletion()
EmbeddedEngine.CompletionCallback that logs when the engine fails to start the connector or when the connector
stops running after completing successfully or due to an errorEmbeddedEngine.CompletionCallbackprotected void start(Class<? extends org.apache.kafka.connect.source.SourceConnector> connectorClass, Configuration connectorConfig)
connectorClass - the connector class; may not be nullconnectorConfig - the configuration for the connector; may not be nullprotected void start(Class<? extends org.apache.kafka.connect.source.SourceConnector> connectorClass, Configuration connectorConfig, Predicate<org.apache.kafka.connect.source.SourceRecord> isStopRecord)
connectorClass - the connector class; may not be nullconnectorConfig - the configuration for the connector; may not be nullisStopRecord - the function that will be called to determine if the connector should be stopped before processing
this record; may be null if not neededprotected void start(Class<? extends org.apache.kafka.connect.source.SourceConnector> connectorClass, Configuration connectorConfig, io.debezium.embedded.EmbeddedEngine.CompletionCallback callback)
connectorClass - the connector class; may not be nullconnectorConfig - the configuration for the connector; may not be nullcallback - the function that will be called when the engine fails to start the connector or when the connector
stops running after completing successfully or due to an error; may be nullprotected void start(Class<? extends org.apache.kafka.connect.source.SourceConnector> connectorClass, Configuration connectorConfig, io.debezium.embedded.EmbeddedEngine.CompletionCallback callback, Predicate<org.apache.kafka.connect.source.SourceRecord> isStopRecord)
connectorClass - the connector class; may not be nullconnectorConfig - the configuration for the connector; may not be nullisStopRecord - the function that will be called to determine if the connector should be stopped before processing
this record; may be null if not neededcallback - the function that will be called when the engine fails to start the connector or when the connector
stops running after completing successfully or due to an error; may be nullprotected void setConsumeTimeout(long timeout,
TimeUnit unit)
consumeRecord(), consumeRecords(int), and
consumeRecords(int, Consumer) methods block while waiting for each record before returning null.timeout - the timeout; must be positiveunit - the time unit; may not be nullprotected org.apache.kafka.connect.source.SourceRecord consumeRecord()
throws InterruptedException
InterruptedException - if the thread was interrupted while waiting for a record to be returnedprotected int consumeRecords(int numberOfRecords)
throws InterruptedException
numberOfRecords - the number of records that should be consumedInterruptedException - if the thread was interrupted while waiting for a record to be returnedprotected int consumeRecords(int numberOfRecords,
int breakAfterNulls,
Consumer<org.apache.kafka.connect.source.SourceRecord> recordConsumer,
boolean assertRecords)
throws InterruptedException
numberOfRecords - the number of records that should be consumedbreakAfterNulls - the number of allowed runs when no records are receivedrecordConsumer - the function that should be called with each consumed recordassertRecords - true if records serialization should be verifiedInterruptedException - if the thread was interrupted while waiting for a record to be returnedprotected int consumeRecords(int numberOfRecords,
Consumer<org.apache.kafka.connect.source.SourceRecord> recordConsumer)
throws InterruptedException
numberOfRecords - the number of records that should be consumedrecordConsumer - the function that should be called with each consumed recordInterruptedException - if the thread was interrupted while waiting for a record to be returnedprotected AbstractConnectorTest.SourceRecords consumeRecordsByTopic(int numRecords, int breakAfterNulls) throws InterruptedException
numRecords - the number of records that should be consumedbreakAfterNulls - how many times to wait when no records arrive from the connectorInterruptedException - if the thread was interrupted while waiting for a record to be returnedprotected AbstractConnectorTest.SourceRecords consumeRecordsByTopic(int numRecords) throws InterruptedException
numRecords - the number of records that should be consumedInterruptedException - if the thread was interrupted while waiting for a record to be returnedprotected AbstractConnectorTest.SourceRecords consumeRecordsByTopic(int numRecords, boolean assertRecords) throws InterruptedException
numRecords - the number of records that should be consumedtrue - if the record serialization should be testedInterruptedException - if the thread was interrupted while waiting for a record to be returnedprotected int consumeAvailableRecords(Consumer<org.apache.kafka.connect.source.SourceRecord> recordConsumer)
recordConsumer - the function that should be called with each consumed recordprotected boolean waitForAvailableRecords(long timeout,
TimeUnit unit)
timeout - the maximum amount of time to wait; must not be negativeunit - the time unit for timeouttrue if records are available, or false if the timeout occurred and no records are availableprotected void assertConnectorIsRunning()
protected void assertConnectorNotRunning()
protected void assertNoRecordsToConsume()
protected void assertKey(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk)
protected void assertInsert(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk)
protected void assertUpdate(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk)
protected void assertDelete(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk)
protected void assertSourceQuery(org.apache.kafka.connect.source.SourceRecord record,
String query)
protected void assertHasNoSourceQuery(org.apache.kafka.connect.source.SourceRecord record)
protected void assertTombstone(org.apache.kafka.connect.source.SourceRecord record,
String pkField,
int pk)
protected void assertTombstone(org.apache.kafka.connect.source.SourceRecord record)
protected void assertOffset(org.apache.kafka.connect.source.SourceRecord record,
Map<String,?> expectedOffset)
protected void assertOffset(org.apache.kafka.connect.source.SourceRecord record,
String offsetField,
Object expectedValue)
protected void assertValueField(org.apache.kafka.connect.source.SourceRecord record,
String fieldPath,
Object expectedValue)
protected void assertSchemaMatchesStruct(org.apache.kafka.connect.data.SchemaAndValue value)
Struct is valid and its schema
matches that of the supplied schema.value - the value with a schema; may not be nullprotected void assertSchemaMatchesStruct(org.apache.kafka.connect.data.Struct struct,
org.apache.kafka.connect.data.Schema schema)
Struct is valid and its schema
matches that of the supplied schema.struct - the Struct to validate; may not be nullschema - the expected schema of the Struct; may not be nullprotected void assertEngineIsRunning()
protected void validate(org.apache.kafka.connect.source.SourceRecord record)
SourceRecord's key and value can each be converted to a byte[] and then back to an equivalent
SourceRecord.record - the record to validate; may not be nullprotected void print(org.apache.kafka.connect.source.SourceRecord record)
protected void debug(org.apache.kafka.connect.source.SourceRecord record)
protected void assertConfigurationErrors(org.apache.kafka.common.config.Config config,
Field field,
int numErrors)
protected void assertConfigurationErrors(org.apache.kafka.common.config.Config config,
Field field,
int minErrorsInclusive,
int maxErrorsInclusive)
protected void assertConfigurationErrors(org.apache.kafka.common.config.Config config,
Field field)
protected void assertNoConfigurationErrors(org.apache.kafka.common.config.Config config,
Field... fields)
protected org.apache.kafka.common.config.ConfigValue configValue(org.apache.kafka.common.config.Config config,
String fieldName)
protected <T> Map<String,Object> readLastCommittedOffset(Configuration config, Map<String,T> partition)
config - the configuration of the engine used to persist the offsetspartition - the partitionprotected <T> Map<Map<String,T>,Map<String,Object>> readLastCommittedOffsets(Configuration config, Collection<Map<String,T>> partitions)
config - the configuration of the engine used to persist the offsetspartitions - the partitionsprotected String assertBeginTransaction(org.apache.kafka.connect.source.SourceRecord record)
protected void assertEndTransaction(org.apache.kafka.connect.source.SourceRecord record,
String expectedTxId,
long expectedEventCount,
Map<String,Number> expectedPerTableCount)
protected void assertRecordTransactionMetadata(org.apache.kafka.connect.source.SourceRecord record,
String expectedTxId,
long expectedTotalOrder,
long expectedCollectionOrder)
public static void waitForSnapshotToBeCompleted(String connector, String server) throws InterruptedException
InterruptedExceptionpublic static void waitForStreamingRunning(String connector, String server) throws InterruptedException
InterruptedExceptionpublic static void waitForStreamingRunning(String connector, String server, String contextName) throws InterruptedException
InterruptedExceptionCopyright © 2020 JBoss by Red Hat. All rights reserved.