Skip to content

Commit

Permalink
log-records-on-table-actions (#256)
Browse files Browse the repository at this point in the history
- log kafka metadata for record that creates tables
- log kafka metadata for record that causes schema evolution
  • Loading branch information
tabmatfournier authored May 23, 2024
1 parent e7c07bf commit e942a59
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,13 @@
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.kafka.connect.errors.DataException;
import org.apache.kafka.connect.sink.SinkRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class IcebergWriter implements RecordWriter {

private static final Logger LOG = LoggerFactory.getLogger(IcebergWriter.class);

private final Table table;
private final String tableName;
private final IcebergSinkConfig config;
Expand Down Expand Up @@ -91,6 +96,7 @@ private Record convertToRow(SinkRecord record) {
flush();
// apply the schema updates, this will refresh the table
SchemaUtils.applySchemaUpdates(table, updates);
LOG.info("Table schema evolution on table {} caused by record at topic: {}, partition: {}, offset: {}", table.name(), record.topic(), record.kafkaPartition(), record.kafkaOffset());
// initialize a new writer with the new schema
initNewWriter();
// convert the row again, this time using the new table schema
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ Table autoCreateTable(String tableName, SinkRecord sample) {
result.set(
catalog.createTable(
identifier, schema, partitionSpec, config.autoCreateProps()));
LOG.info("Created new table {} from record at topic: {}, partition: {}, offset: {}", identifier, sample.topic(), sample.kafkaPartition(), sample.kafkaOffset());
}
});
return result.get();
Expand Down

0 comments on commit e942a59

Please sign in to comment.