| Package | Description |
|---|---|
| io.debezium.connector.cassandra |
| Modifier and Type | Field and Description |
|---|---|
private RowData |
Record.rowData |
| Modifier and Type | Method and Description |
|---|---|
RowData |
FieldFilterSelector.FieldFilter.apply(RowData rowData) |
RowData |
RowData.copy() |
private static RowData |
SnapshotProcessor.extractRowData(com.datastax.driver.core.Row row,
List<com.datastax.driver.core.ColumnMetadata> columns,
Set<String> partitionKeyNames,
Set<String> clusteringKeyNames,
Object executionTime)
This function extracts the relevant row data from
Row and updates the maximum writetime for each row. |
RowData |
Record.getRowData() |
| Modifier and Type | Method and Description |
|---|---|
RowData |
FieldFilterSelector.FieldFilter.apply(RowData rowData) |
private void |
RecordMaker.createRecord(String cluster,
OffsetPosition offsetPosition,
KeyspaceTable keyspaceTable,
boolean snapshot,
Instant tsMicro,
RowData data,
org.apache.kafka.connect.data.Schema keySchema,
org.apache.kafka.connect.data.Schema valueSchema,
boolean markOffset,
io.debezium.function.BlockingConsumer<Record> consumer,
Record.Operation operation) |
void |
RecordMaker.delete(String cluster,
OffsetPosition offsetPosition,
KeyspaceTable keyspaceTable,
boolean snapshot,
Instant tsMicro,
RowData data,
org.apache.kafka.connect.data.Schema keySchema,
org.apache.kafka.connect.data.Schema valueSchema,
boolean markOffset,
io.debezium.function.BlockingConsumer<Record> consumer) |
void |
RecordMaker.insert(String cluster,
OffsetPosition offsetPosition,
KeyspaceTable keyspaceTable,
boolean snapshot,
Instant tsMicro,
RowData data,
org.apache.kafka.connect.data.Schema keySchema,
org.apache.kafka.connect.data.Schema valueSchema,
boolean markOffset,
io.debezium.function.BlockingConsumer<Record> consumer) |
private void |
CommitLogReadHandlerImpl.populateClusteringColumns(RowData after,
org.apache.cassandra.db.rows.Row row,
org.apache.cassandra.db.partitions.PartitionUpdate pu) |
private void |
CommitLogReadHandlerImpl.populatePartitionColumns(RowData after,
org.apache.cassandra.db.partitions.PartitionUpdate pu) |
private void |
CommitLogReadHandlerImpl.populateRegularColumns(RowData after,
org.apache.cassandra.db.rows.Row row,
CommitLogReadHandlerImpl.RowType rowType,
SchemaHolder.KeyValueSchema schema) |
void |
RecordMaker.update(String cluster,
OffsetPosition offsetPosition,
KeyspaceTable keyspaceTable,
boolean snapshot,
Instant tsMicro,
RowData data,
org.apache.kafka.connect.data.Schema keySchema,
org.apache.kafka.connect.data.Schema valueSchema,
boolean markOffset,
io.debezium.function.BlockingConsumer<Record> consumer) |
| Constructor and Description |
|---|
ChangeRecord(SourceInfo source,
RowData rowData,
org.apache.kafka.connect.data.Schema keySchema,
org.apache.kafka.connect.data.Schema valueSchema,
Record.Operation op,
boolean markOffset) |
Record(SourceInfo source,
RowData rowData,
org.apache.kafka.connect.data.Schema keySchema,
org.apache.kafka.connect.data.Schema valueSchema,
Record.Operation op,
boolean shouldMarkOffset,
long ts) |
TombstoneRecord(SourceInfo source,
RowData rowData,
org.apache.kafka.connect.data.Schema keySchema) |
Copyright © 2021 JBoss by Red Hat. All rights reserved.