Skip to content

Commit 9a1faab

Browse files
committed
refactor: Update folder structure and sort import
Signed-off-by: Joel Hanson <[email protected]>
1 parent e6b1e8d commit 9a1faab

18 files changed

+639
-434
lines changed

src/main/java/com/ibm/eventstreams/connect/jdbcsink/JDBCSinkConfig.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/*
22
*
3-
* Copyright 2020 IBM Corporation
3+
* Copyright 2020, 2023 IBM Corporation
44
*
55
* Licensed under the Apache License, Version 2.0 (the "License");
66
* you may not use this file except in compliance with the License.
@@ -18,11 +18,11 @@
1818

1919
package com.ibm.eventstreams.connect.jdbcsink;
2020

21+
import java.util.Map;
22+
2123
import org.apache.kafka.common.config.AbstractConfig;
2224
import org.apache.kafka.common.config.ConfigDef;
2325

24-
import java.util.Map;
25-
2626
public class JDBCSinkConfig extends AbstractConfig {
2727

2828
private static final String CONFIG_CONNECTION_GROUP = "Connection";
@@ -49,7 +49,7 @@ public class JDBCSinkConfig extends AbstractConfig {
4949

5050
public static final String CONFIG_NAME_INSERT_MODE_DATABASELEVEL = "insert.mode.databaselevel";
5151
private static final String CONFIG_DOCUMENTATION_INSERT_MODE_DATABASELEVEL = "The insertion mode to use (ex: insert, upsert, or update).";
52-
private static final String CONFIG_DISPLAY_INSERT_MODE_DATABASELEVEL = "Insert mode database level";
52+
private static final String CONFIG_DISPLAY_INSERT_MODE_DATABASELEVEL = "Insert mode database level";
5353

5454
public static ConfigDef config() {
5555
ConfigDef config = new ConfigDef();

src/main/java/com/ibm/eventstreams/connect/jdbcsink/JDBCSinkConnector.java

Lines changed: 26 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/*
22
*
3-
* Copyright 2020 IBM Corporation
3+
* Copyright 2020, 2023 IBM Corporation
44
*
55
* Licensed under the Apache License, Version 2.0 (the "License");
66
* you may not use this file except in compliance with the License.
@@ -18,15 +18,15 @@
1818

1919
package com.ibm.eventstreams.connect.jdbcsink;
2020

21+
import java.util.Collections;
22+
import java.util.List;
23+
import java.util.Map;
24+
2125
import org.apache.kafka.common.config.Config;
2226
import org.apache.kafka.common.config.ConfigDef;
2327
import org.apache.kafka.connect.connector.Task;
2428
import org.apache.kafka.connect.sink.SinkConnector;
2529

26-
import java.util.Collections;
27-
import java.util.List;
28-
import java.util.Map;
29-
3030
public class JDBCSinkConnector extends SinkConnector {
3131
// TODO: check with ibm-messaging about externalizing snapshot version
3232
public static String VERSION = "0.0.3-SNAPSHOT";
@@ -38,24 +38,28 @@ public class JDBCSinkConnector extends SinkConnector {
3838
*
3939
* @return the version, formatted as a String
4040
*/
41-
@Override public String version() {
41+
@Override
42+
public String version() {
4243
return VERSION;
4344
}
4445

4546
/**
46-
* Start this Connector. This method will only be called on a clean Connector, i.e. it has
47-
* either just been instantiated and initialized or {@link #stop()} has been invoked.
47+
* Start this Connector. This method will only be called on a clean Connector,
48+
* i.e. it has either just been instantiated and initialized or {@link #stop()}
49+
* has been invoked.
4850
*
4951
* @param props configuration settings
5052
*/
51-
@Override public void start(Map<String, String> props) {
53+
@Override
54+
public void start(Map<String, String> props) {
5255
this.props = props;
5356
}
5457

5558
/**
5659
* Returns the Task implementation for this Connector.
5760
*/
58-
@Override public Class<? extends Task> taskClass() {
61+
@Override
62+
public Class<? extends Task> taskClass() {
5963
return JDBCSinkTask.class;
6064
}
6165

@@ -66,33 +70,39 @@ public class JDBCSinkConnector extends SinkConnector {
6670
* @param maxTasks maximum number of configurations to generate
6771
* @return configurations for Tasks
6872
*/
69-
@Override public List<Map<String, String>> taskConfigs(int maxTasks) {
73+
@Override
74+
public List<Map<String, String>> taskConfigs(int maxTasks) {
7075
return Collections.nCopies(maxTasks, props);
7176
}
7277

7378
/**
7479
* Stop this connector.
7580
*/
76-
@Override public void stop() {
81+
@Override
82+
public void stop() {
7783

7884
}
7985

8086
/**
8187
* Define the configuration for the connector.
88+
*
8289
* @return The ConfigDef for this connector.
8390
*/
84-
@Override public ConfigDef config() {
91+
@Override
92+
public ConfigDef config() {
8593
return JDBCSinkConfig.config();
8694
}
8795

8896
/**
89-
* Provides a default validation implementation which returns a list of allowed configurations
90-
* together with configuration errors and recommended values for each configuration.
97+
* Provides a default validation implementation which returns a list of allowed
98+
* configurations together with configuration errors and recommended values for
99+
* each configuration.
91100
*
92101
* @param connectorConfigs connector configuration values
93102
* @return list of allowed configurations
94103
*/
95-
@Override public Config validate(Map<String, String> connectorConfigs) {
104+
@Override
105+
public Config validate(Map<String, String> connectorConfigs) {
96106
return super.validate(connectorConfigs);
97107
}
98108

Lines changed: 49 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/*
22
*
3-
* Copyright 2020 IBM Corporation
3+
* Copyright 2020, 2023 IBM Corporation
44
*
55
* Licensed under the Apache License, Version 2.0 (the "License");
66
* you may not use this file except in compliance with the License.
@@ -18,75 +18,87 @@
1818

1919
package com.ibm.eventstreams.connect.jdbcsink;
2020

21-
import com.ibm.eventstreams.connect.jdbcsink.database.DatabaseFactory;
22-
import com.ibm.eventstreams.connect.jdbcsink.database.IDatabase;
21+
import java.sql.SQLException;
22+
import java.time.Duration;
23+
import java.time.Instant;
24+
import java.util.Collection;
25+
import java.util.Map;
26+
2327
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
2428
import org.apache.kafka.common.TopicPartition;
2529
import org.apache.kafka.connect.connector.Connector;
30+
import org.apache.kafka.connect.errors.ConnectException;
2631
import org.apache.kafka.connect.sink.SinkRecord;
2732
import org.apache.kafka.connect.sink.SinkTask;
2833
import org.apache.kafka.connect.sink.SinkTaskContext;
2934
import org.slf4j.Logger;
3035
import org.slf4j.LoggerFactory;
3136

32-
import java.sql.SQLException;
33-
import java.time.Duration;
34-
import java.time.Instant;
35-
import java.util.Collection;
36-
import java.util.Map;
37+
import com.ibm.eventstreams.connect.jdbcsink.database.DatabaseFactory;
38+
import com.ibm.eventstreams.connect.jdbcsink.database.IDatabase;
3739

3840
public class JDBCSinkTask extends SinkTask {
3941
private static final Logger logger = LoggerFactory.getLogger(JDBCSinkTask.class);
4042
private static final String classname = JDBCSinkTask.class.getName();
4143

4244
// TODO: needs to be generic and incorporate other database types
43-
// needs an interface
45+
// needs an interface
4446
private JDBCSinkConfig config;
4547

4648
public IDatabase database;
4749

4850
int remainingRetries; // init max retries via config.maxRetries ...
4951

5052
/**
51-
* Start the Task. This should handle any configuration parsing and one-time setup of the task.
53+
* Start the Task. This should handle any configuration parsing and one-time
54+
* setup of the task.
55+
*
5256
* @param props initial configuration
5357
*/
54-
@Override public void start(Map<String, String> props) {
58+
@Override
59+
public void start(Map<String, String> props) {
5560
logger.trace("[{}] Entry {}.start, props={}", Thread.currentThread().getId(), classname, props);
5661
this.config = new JDBCSinkConfig(props);
5762

58-
DatabaseFactory databaseFactory = new DatabaseFactory();
63+
DatabaseFactory databaseFactory = getDatabaseFactory();
5964
try {
6065
this.database = databaseFactory.makeDatabase(this.config);
6166
} catch (Exception e) {
6267
logger.error("Failed to build the database {} ", e);
63-
e.printStackTrace();
64-
throw e;
68+
throw new ConnectException(e);
6569
}
6670

6771
logger.trace("[{}] Exit {}.start", Thread.currentThread().getId(), classname);
6872
}
6973

74+
protected DatabaseFactory getDatabaseFactory() {
75+
DatabaseFactory databaseFactory = new DatabaseFactory();
76+
return databaseFactory;
77+
}
78+
7079
/**
7180
* Put the records in the sink.
7281
*
73-
* If this operation fails, the SinkTask may throw a {@link org.apache.kafka.connect.errors.RetriableException} to
74-
* indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to
75-
* be stopped immediately. {@link SinkTaskContext#timeout(long)} can be used to set the maximum time before the
76-
* batch will be retried.
82+
* If this operation fails, the SinkTask may throw a
83+
* {@link org.apache.kafka.connect.errors.RetriableException} to indicate that
84+
* the framework should attempt to retry the same call again. Other exceptions
85+
* will cause the task to be stopped immediately.
86+
* {@link SinkTaskContext#timeout(long)} can be used to set the maximum time
87+
* before the batch will be retried.
7788
*
7889
* @param records the set of records to send
7990
*/
80-
@Override public void put(Collection<SinkRecord> records) {
91+
@Override
92+
public void put(Collection<SinkRecord> records) {
93+
logger.trace("[{}] Entry {}.put", Thread.currentThread().getId(), classname);
8194
if (records.isEmpty()) {
8295
return;
8396
}
8497

8598
final SinkRecord first = records.iterator().next();
8699
final int recordsCount = records.size();
87100
logger.info("Received {} records. First record kafka coordinates:({}-{}-{}). Writing them to the database...",
88-
recordsCount, first.topic(), first.kafkaPartition(), first.kafkaOffset()
89-
);
101+
recordsCount, first.topic(), first.kafkaPartition(), first.kafkaOffset());
90102

91103
final String tableName = config.getString(JDBCSinkConfig.CONFIG_NAME_TABLE_NAME_FORMAT);
92104

@@ -96,28 +108,37 @@ public class JDBCSinkTask extends SinkTask {
96108
this.database.getWriter().insert(tableName, records);
97109
logger.info(String.format("%d RECORDS PROCESSED", records.size()));
98110
Instant finish = Instant.now();
99-
long timeElapsed = Duration.between(start, finish).toMillis(); //in millis
100-
logger.info(String.format("Processed '%d' records", records.size() ));
111+
long timeElapsed = Duration.between(start, finish).toMillis(); // in millis
112+
logger.info(String.format("Processed '%d' records", records.size()));
101113
logger.info(String.format("Total Execution time: %d", timeElapsed));
102114
} catch (SQLException error) {
103115
logger.error("Write of {} records failed, remainingRetries={}", recordsCount, remainingRetries, error);
104-
// TODO: throw exception to cancel execution or retry?
116+
throw new ConnectException(error);
117+
} catch (final RuntimeException e) {
118+
logger.error("Unexpected runtime exception: ", e);
119+
throw e;
105120
}
121+
122+
logger.trace("[{}] Exit {}.put", Thread.currentThread().getId(), classname);
106123
}
107124

108-
@Override public void stop() {
125+
@Override
126+
public void stop() {
109127
}
110128

111-
@Override public void flush(Map<TopicPartition, OffsetAndMetadata> map) {
129+
@Override
130+
public void flush(Map<TopicPartition, OffsetAndMetadata> map) {
112131
// Not necessary
113132
}
114133

115134
/**
116-
* Get the version of this task. Usually this should be the same as the corresponding {@link Connector} class's version.
135+
* Get the version of this task. Usually this should be the same as the
136+
* corresponding {@link Connector} class's version.
117137
*
118138
* @return the version, formatted as a String
119139
*/
120-
@Override public String version() {
140+
@Override
141+
public String version() {
121142
return getClass().getPackage().getImplementationVersion();
122143
}
123144
}
Lines changed: 32 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/*
22
*
3-
* Copyright 2020 IBM Corporation
3+
* Copyright 2020, 2023 IBM Corporation
44
*
55
* Licensed under the Apache License, Version 2.0 (the "License");
66
* you may not use this file except in compliance with the License.
@@ -18,15 +18,16 @@
1818

1919
package com.ibm.eventstreams.connect.jdbcsink.database;
2020

21+
import java.beans.PropertyVetoException;
22+
23+
import org.slf4j.Logger;
24+
import org.slf4j.LoggerFactory;
25+
2126
import com.ibm.eventstreams.connect.jdbcsink.JDBCSinkConfig;
2227
import com.ibm.eventstreams.connect.jdbcsink.database.datasource.IDataSource;
2328
import com.ibm.eventstreams.connect.jdbcsink.database.datasource.PooledDataSource;
2429
import com.ibm.eventstreams.connect.jdbcsink.database.exception.DatabaseNotSupportedException;
2530
import com.ibm.eventstreams.connect.jdbcsink.database.exception.JdbcDriverClassNotFoundException;
26-
import org.slf4j.Logger;
27-
import org.slf4j.LoggerFactory;
28-
29-
import java.beans.PropertyVetoException;
3031

3132
public class DatabaseFactory {
3233
private static final Logger logger = LoggerFactory.getLogger(DatabaseFactory.class);
@@ -35,39 +36,53 @@ public IDatabase makeDatabase(JDBCSinkConfig config) {
3536

3637
logger.warn("DatabaseFactory: makeDatabase");
3738

38-
String jdbcUrl = config.getString(JDBCSinkConfig.CONFIG_NAME_CONNECTION_URL);
39+
DatabaseType databaseType = getDatabaseType(config);
40+
41+
String databaseDriver = getDatabaseDriver(databaseType);
42+
43+
IDataSource dataSource = getDataSource(config, databaseDriver);
3944

45+
return databaseType.create(dataSource);
46+
}
47+
48+
private DatabaseType getDatabaseType(JDBCSinkConfig config) {
49+
String jdbcUrl = config.getString(JDBCSinkConfig.CONFIG_NAME_CONNECTION_URL);
4050
DatabaseType databaseType = DatabaseType.fromJdbcUrl(jdbcUrl);
4151

4252
if (databaseType == null) {
4353
throw new DatabaseNotSupportedException("Check " + jdbcUrl);
4454
}
55+
return databaseType;
56+
}
4557

46-
String databaseDriver = databaseType.getDriver();
47-
try {
48-
Class.forName(databaseDriver);
49-
} catch (ClassNotFoundException cnf) {
50-
logger.error(databaseType.name() + " JDBC driver not found", cnf);
51-
throw new JdbcDriverClassNotFoundException(databaseType.name());
52-
}
53-
58+
private IDataSource getDataSource(JDBCSinkConfig config, String databaseDriver) {
5459
final String username = config.getString(JDBCSinkConfig.CONFIG_NAME_CONNECTION_USER);
5560
final String password = config.getPassword(JDBCSinkConfig.CONFIG_NAME_CONNECTION_PASSWORD).value();
5661
final int poolSize = config.getInt(JDBCSinkConfig.CONFIG_NAME_CONNECTION_DS_POOL_SIZE);
62+
String jdbcUrl = config.getString(JDBCSinkConfig.CONFIG_NAME_CONNECTION_URL);
5763

5864
IDataSource dataSource = null;
5965
try {
6066
dataSource = new PooledDataSource.Builder(
6167
username,
6268
password,
6369
jdbcUrl,
64-
databaseDriver
65-
).withInitialPoolSize(poolSize).build();
70+
databaseDriver).withInitialPoolSize(poolSize).build();
6671
} catch (PropertyVetoException e) {
6772
logger.error(e.toString());
6873
}
74+
return dataSource;
75+
}
6976

70-
return databaseType.create(dataSource);
77+
private String getDatabaseDriver(DatabaseType databaseType) {
78+
String databaseDriver = databaseType.getDriver();
79+
try {
80+
Class.forName(databaseDriver);
81+
} catch (ClassNotFoundException cnf) {
82+
logger.error(databaseType.name() + " JDBC driver not found", cnf);
83+
throw new JdbcDriverClassNotFoundException(databaseType.name());
84+
}
85+
return databaseDriver;
7186
}
7287

7388
}

0 commit comments

Comments
 (0)